repo_id
stringclasses 1
value | file_path
stringlengths 8
82
| content
stringlengths 23
44.3k
|
---|---|---|
CloudWhisperCustomBot | Makefile | # List of targets and their descriptions
.PHONY: help
help:
@echo "Note: The following commands will change the status of 'cloud_whisper' database.\n"
@echo "Available targets:"
@echo " migrate Create a new Migration File."
@echo " upgrade Upgrade to a later version."
@echo " downgrade Revert to a previous version."
@echo " head View current revision/version."
migrate:
@echo "Populate revision script with candidate migration operations, based on comparison of database to model."
@docker exec cloud_whisper_web bash -c "alembic revision --autogenerate"
upgrade:
echo "Upgrade to a later version."
@docker exec cloud_whisper_web bash -c "alembic upgrade head"
downgrade:
echo "Revert to a previous version."
@docker exec cloud_whisper_web bash -c "alembic downgrade -1"
head:
echo "View current revision/version"
@docker exec cloud_whisper_web bash -c "alembic current"
|
CloudWhisperCustomBot | Dockerfile | # Dockerfile
FROM python:3.11-slim-bullseye
# Set the working directory
WORKDIR /CloudWhisperCustomBot
COPY requirements.txt .
RUN apt-get update
RUN apt-get install -y build-essential
RUN apt-get install -y dumb-init
RUN apt-get install -y curl
RUN apt-get install -y lsb-release
RUN apt-get install -y wget
RUN apt-get install -y cmake
RUN apt-get install -y libpq-dev gcc python3-dev
RUN pip3 install --upgrade pip && pip3 install -r requirements.txt
# Copy the app source code
COPY . /CloudWhisperCustomBot
RUN chmod 755 /CloudWhisperCustomBot/scripts/dev.sh
RUN chmod 755 /CloudWhisperCustomBot/scripts/dev.sh
ENV PYTHONPATH=/CloudWhisperCustomBot
ENTRYPOINT ["dumb-init", "--"]
CMD ["/bin/bash", "-c", "/CloudWhisperCustomBot/scripts/dev.sh"]
|
CloudWhisperCustomBot | docker-compose.yml | services:
cloud_whisper_fe:
container_name: cloud_whisper_fe
build: ../cloud-whisper-frontend
command: npm run build
environment:
REACT_APP_API_URL: https://cloudwhisper-stage.wanclouds.ai/
REACT_APP_AUTH_REDIRECT_URI: https://cloudwhisper-stage.wanclouds.ai/users/wc/callback
REACT_APP_WEBSOCKETS_STATUS: enabled
REACT_APP_WEBSOCKETS_URL: wss://cloudwhisper-stage.wanclouds.ai/v1/whisper/websockets/whisper-inference
REACT_APP_GOOGLE_AUTH_SSO_REDIRECTION_URI: https://cloudwhisper-stage.wanclouds.ai/
REACT_APP_GOOGLE_AUTH_SSO_STATUS: enabled
REACT_APP_WANCLOUDS_AUTH_SSO_STATUS: disabled
REACT_APP_PAYMENT_CALLBACK_URI_PATH: https://cloudwhisper-stage.wanclouds.ai
REACT_APP_DRAAS_BOT_STATUS: 'enabled'
REACT_APP_DRAAS_BOT_URI: https://cloudwhisper-stage.wanclouds.ai/ #THIS IS THE MAIN PAGE COMMENT OUT IF NOT NEEDED
REACT_APP_AUTH_URL: https://accounts-stage.wanclouds.net/
ports:
- "3000:3000"
networks:
- cloud_whisper_custom_bot
cloud_whisper_web:
container_name: cloud_whisper_web
env_file:
- "./.env.web"
- "./.env.aws_configurations"
- "./.env.anthropic_apikey"
- "./.env.postgres"
- "./.env.groq_apikey"
- "./.env.base_bot_secrets"
- "./.env.neo4j"
restart: always
build:
context: .
dockerfile: Dockerfile
image: cloud_whisper_custom_web
environment:
BASE_BOT_URL: "https://wanclouds.ai/v1/whisper/bots/{BASE_BOT_ID}/qna_chats"
AUTH_LINK: https://vpc-stage.wanclouds.net
BACKEND_URI: https://cloudwhisper-stage.wanclouds.ai
ports:
- "8008:8008"
expose:
- "8008"
depends_on:
- postgresdb
volumes:
- ./app:/CloudWhisperCustomBot/app
- ./migrations:/CloudWhisperCustomBot/migrations
- ./cache:/CloudWhisperCustomBot/cache
- ./cache/huggingface:/root/.cache/huggingface
networks:
- cloud_whisper_custom_bot
nginx:
image: wancloudsinc/doosra-vpc-nginx:latest
container_name: nginx
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
networks:
- cloud_whisper_custom_bot
qdrant:
image: qdrant/qdrant:v1.11.3
container_name: qdrant
ports:
- "6333:6333"
- "6334:6334"
volumes:
- ./qdrant_data:/qdrant/storage
networks:
- cloud_whisper_custom_bot
neo4j:
image: neo4j:5.19.0
container_name: neo4j
ports:
- "7474:7474"
- "7687:7687"
volumes:
- ./app:/CloudWhisperCustomBot/app/neo4jdata
environment:
- NEO4J_AUTH=neo4j/72054321
- NEO4J_PLUGINS=["apoc"]
- NEO4J_apoc_export_file_enabled='true'
- NEO4J_apoc_import_file_enabled='true'
- NEO4J_apoc_import_file_use__neo4j__config='true'
networks:
- cloud_whisper_custom_bot
discovery_worker:
env_file:
- "./.env.postgres"
build:
context: .
dockerfile: ./Dockerfile
image: cloud_whisper_custom_web
entrypoint: ./scripts/discovery_worker.sh
container_name: discovery_worker
links:
- redis
depends_on:
- redis
- cloud_whisper_web
- postgresdb
environment:
- NEO4J_URI=bolt://neo4j:7687
volumes:
- .:/CloudWhisperCustomBot
restart: always
networks:
- cloud_whisper_custom_bot
cloud_whisper_worker:
build:
context: .
dockerfile: ./Dockerfile
image: cloud_whisper_custom_web
entrypoint: ./scripts/worker.sh
container_name: cloud_whisper_worker
links:
- redis
- postgresdb
depends_on:
- redis
volumes:
- .:/CloudWhisperCustomBot
restart: always
networks:
- cloud_whisper_custom_bot
beat:
build:
context: .
dockerfile: ./Dockerfile
image: cloud_whisper_custom_web
entrypoint: ./scripts/beat.sh
container_name: beat
links:
- redis
depends_on:
- redis
- cloud_whisper_web
volumes:
- .:/app/redis_data
restart: always
networks:
- cloud_whisper_custom_bot
redis:
image: redis:latest
container_name: redis
networks:
- cloud_whisper_custom_bot
postgresdb:
image: postgres:16
env_file:
- "./.env.postgres"
container_name: postgresdb
environment:
POSTGRES_USER: admin
POSTGRES_PASSWORD: admin123
POSTGRES_DB: cloud_whisper
PGDATA: /data/postgres
ports:
- "5432:5432"
volumes:
- dbdata:/data/postgres
networks:
- cloud_whisper_custom_bot
pgadmin:
image: dpage/pgadmin4
container_name: pgadmin4
restart: always
ports:
- "8888:80"
environment:
PGADMIN_DEFAULT_EMAIL: admin@wanclouds.net
PGADMIN_DEFAULT_PASSWORD: admin123
PGADMIN_CONFIG_SERVER_MODE: 'False'
volumes:
- pgadmin-data:/var/lib/pgadmin
networks:
- cloud_whisper_custom_bot
volumes:
dbdata:
driver: local
pgadmin-data:
driver: local
redis_data:
driver: local
neo4jdata:
driver: local
networks:
cloud_whisper_custom_bot:
|
CloudWhisperCustomBot | alembic.ini | # A generic, single database configuration.
[alembic]
# path to migration scripts.
# Use forward slashes (/) also on windows to provide an os agnostic path
script_location = migrations
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
# Uncomment the line below if you want the files to be prepended with date and time
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
# sys.path path, will be prepended to sys.path if present.
# defaults to the current working directory.
prepend_sys_path = .
# timezone to use when rendering the date within the migration file
# as well as the filename.
# If specified, requires the python>=3.9 or backports.zoneinfo library.
# Any required deps can installed by adding `alembic[tz]` to the pip requirements
# string value is passed to ZoneInfo()
# leave blank for localtime
# timezone =
# max length of characters to apply to the "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; This defaults
# to migrations/versions. When using multiple version
# directories, initial revisions must be specified with --version-path.
# The path separator used here should be the separator specified by "version_path_separator" below.
# version_locations = %(here)s/bar:%(here)s/bat:migrations/versions
# version path separator; As mentioned above, this is the character used to split
# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
# Valid values for version_path_separator are:
#
# version_path_separator = :
# version_path_separator = ;
# version_path_separator = space
version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
# set to 'true' to search source files recursively
# in each "version_locations" directory
# new in Alembic version 1.10
# recursive_version_locations = false
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url = driver://user:pass@localhost/dbname
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks = black
# black.type = console_scripts
# black.entrypoint = black
# black.options = -l 79 REVISION_SCRIPT_FILENAME
# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
# hooks = ruff
# ruff.type = exec
# ruff.executable = %(here)s/.venv/bin/ruff
# ruff.options = --fix REVISION_SCRIPT_FILENAME
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S
|
CloudWhisperCustomBot | pyproject.toml | [tool.ruff]
line-length = 120
target-version = "py311"
[tool.ruff.format]
indent-style = "tab"
quote-style = "double"
[tool.poetry]
name = "CloudWhisperCustomBot"
version = "0.1.0"
description = "Chat with your API in Natural Language"
authors = ["syedfurqan <syedfurqan@wanclouds.net>"]
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.poetry.dependencies]
python = "^3.9"
aiohttp = "^3.9.1"
urllib3 = "^2.1.0"
transformers = "^4.36.2"
langchain-community = "^0.0.12"
loguru = "^0.7.2"
fastapi = "^0.109.0"
uvicorn = "^0.25.0"
edgedb = "^1.8.0"
python-dotenv = "^1.0.0"
openapi-pydantic = "^0.4.0"
torch = "^2.1.2"
peft = "^0.7.1"
langchain = "^0.1.0"
[tool.poetry.group.dev.dependencies]
pytest = "^7.4.4"
ruff = "^0.1.13"
|
CloudWhisperCustomBot | requirements.txt | aiohttp==3.9.0
alembic==1.9.0
alembic_postgresql_enum==1.3.0
anthropic==0.34.2
asyncpg==0.27.0
bcrypt==4.1.3
celery==5.3.1
celery-singleton==0.3.1
faiss-cpu==1.7.4
fastapi==0.104.1
httpx==0.27.0
langchain==0.0.351
langchain-community==0.0.3
langchain-core==0.1.1
loguru==0.7.2
llama-index==0.10.58
llama-index-vector-stores-qdrant==0.2.8
mailchimp-transactional==1.0.46
neo4j==5.21.0
openai==1.37.0
openapi-schema-pydantic==1.2.4
boto3==1.35.34
openapi_pydantic==0.3.2
pydantic==1.10.13
pydantic_core==2.4.0
python-dotenv==1.0.0
python-multipart==0.0.9
pycryptodome==3.20.0
qdrant-client==1.8.0
redis==4.5.4
scipy==1.11.1
sentence-transformers==2.3.1
sse-starlette==1.6.5
sqlalchemy-utils==0.41.2
sqlalchemy[asyncio]==2.0.31
uvicorn==0.24.0.post1
uvicorn[standard]==0.24.0.post1
groq==0.11.0 |
CloudWhisperCustomBot | README.md | # CloudWhisperCustomBot |
CloudWhisperCustomBot | app/main.py | from contextlib import asynccontextmanager
from fastapi import APIRouter, FastAPI
from fastapi.middleware.cors import CORSMiddleware
from loguru import logger
from qdrant_client import models
from qdrant_client.http.exceptions import UnexpectedResponse
from app.core.config import settings, setup_app_logging, neo4j_driver
from app.web import api_router
from app.worker.cypher_store import create_qdrant_client
# Define root API router
root_router = APIRouter()
async def get_neo4j_session(driver):
session = driver.session()
return session
async def check_qdrant_collection():
qd_client = create_qdrant_client(location=settings.qdrant.QDRANT_LOCATION,
api_key=settings.qdrant.QDRANT_API_KEY,
url=settings.qdrant.QDRANT_URL)
collection_name = 'cypher_queries'
try:
qd_client.get_collection(collection_name)
logger.info(f"Collection '{collection_name}' exists.")
except UnexpectedResponse as e:
logger.info(e)
qd_client.create_collection(
collection_name=collection_name,
vectors_config=models.VectorParams(size=1536, distance=models.Distance.COSINE),
)
logger.info('qdrant collection successfully made')
# Validate configuration on startup
try:
settings.base_bot.validate()
except ValueError as e:
logger.error(f"Configuration error: {e}")
exit(1)
# Asynchronous context manager for application startup
@asynccontextmanager
async def startup(app: FastAPI):
setup_app_logging(config=settings)
app.state.vector_store_index = settings.vector_store.create_vector_store_index()
app.state.neo4j_session = await get_neo4j_session(driver=neo4j_driver)
app.state.qdrant_collection = await check_qdrant_collection()
yield
# Create FastAPI application instance
app = FastAPI(
lifespan=startup,
title="Cloud Whisper API",
openapi_url=f"{settings.URL_PREFIX}/openapi.json",
docs_url=settings.DOCS_URL
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Include routers
app.include_router(api_router, prefix=settings.URL_PREFIX)
app.include_router(root_router)
|
CloudWhisperCustomBot | app/__init__.py | from app.api_discovery.discovery import discover_api_data
from app.worker.cypher_store import qdrant
from app.worker.scheduled_tasks import track_and_update_activity_status
__all__ = ["discover_api_data", "qdrant", "track_and_update_activity_status"]
|
CloudWhisperCustomBot | app/redis_scheduler.py | from datetime import timedelta
from celery import Celery
from celery.signals import worker_ready
from celery_singleton import clear_locks
from app.core.config import settings
broker = settings.redis.REDIS_URL
celery_app = Celery(
'whisper-celery',
broker=broker,
include=[
'app.api_discovery',
'app.worker.cypher_store',
'app.worker'
],
broker_connection_retry_on_startup=True
)
celery_app.conf.beat_schedule = {
'run_discovery': {
'task': 'discover_api_data',
'schedule': timedelta(minutes=1),
'options': {'queue': 'redis_queue'}
},
'track_and_update_activity_task': {
'task': 'track_and_update_activity_status',
'schedule': timedelta(seconds=10),
'options': {'queue': 'redis_queue'}
},
}
@worker_ready.connect
def unlock_all(**kwargs):
clear_locks(celery_app)
|
CloudWhisperCustomBot | app/worker/scheduled_tasks.py | import asyncio
import httpx
import mailchimp_transactional as MailchimpTransactional
from celery_singleton import Singleton
from loguru import logger
from mailchimp_transactional.api_client import ApiClientError
from sqlalchemy import select
from app.models import Profile, ActivityTracking
from app.redis_scheduler import celery_app
from app.redis_scheduler import celery_app as celery
from app.web.common.db_deps import AsyncSessionLocal, get_db_session_async_context
from ..api_discovery.utils import decrypt_api_key
from ..core.config import settings
from ..web.common.utils import update_activity_status
def run_async(coro):
loop = asyncio.get_event_loop()
return loop.run_until_complete(coro)
@celery_app.task(name="track_and_update_activity_status", base=Singleton, queue='redis_queue')
def track_and_update_activity_status():
logger.info("<==============================================================================================>")
logger.info("<==================================== INITIATING ACTIVITY TRACKING ====================================>")
logger.info("<==============================================================================================>")
async def async_operation(query):
async with AsyncSessionLocal() as session:
result = await session.execute(query)
activities = result.scalars().all()
activities = [{'status': activity.status, 'email': activity.email, 'resource_name': activity.resource_name,
'activity_type': activity.activity_type, "id": activity.id, "resource_type": activity.resource_type,
"workflow_id": activity.workflow_id, "action_id": activity.action_id} for activity in activities]
return activities
async def profile_async_operation(query):
async with AsyncSessionLocal() as session:
result = await session.execute(query)
users = result.scalars().all()
users = [{'user_id': user.user_id, 'api_key': user.api_key, 'email': user.email, 'name': user.name,
'api_key_status': user.api_key_status or ''} for user in users]
return users
async def run_task():
query = select(ActivityTracking)
user_query = select(Profile)
activities = await async_operation(query)
users = await profile_async_operation(user_query)
if activities and users:
for activity in activities:
for user in users:
if not user['api_key']:
continue
if user.get('api_key_status') == settings.api_key_status.STATUS_INVALID:
continue
logger.info(f"apikey: {user['api_key']}, user_id: {user['user_id']}")
decrypted_api_key = decrypt_api_key(user['api_key']['API-KEY'])
headers = {"API-KEY": decrypted_api_key}
if activity["status"] in ActivityTracking.poling_statuses_list:
async with httpx.AsyncClient() as http_client:
if activity["activity_type"] == ActivityTracking.RESTORE and activity["resource_type"] in ActivityTracking.resource_types_list:
resp = await http_client.get(
f"{settings.web.AUTH_LINK}/v1/ibm/workspaces/{activity['workflow_id']}",
headers=headers)
else:
resp = await http_client.get(
f"{settings.web.AUTH_LINK}/v1/ibm/workflows/{activity['workflow_id']}",
headers=headers)
if resp and resp.status_code == 200:
async with get_db_session_async_context() as db_session:
await update_activity_status(activity_response=resp.json(),
activity_id=activity["id"])
updated_activity= (await db_session.scalars(select(ActivityTracking).filter(
ActivityTracking.workflow_id == activity['workflow_id']))).one_or_none()
recipients = [{"email": activity["email"], "type": "to"}]
if updated_activity.status == ActivityTracking.STATUS_C_SUCCESSFULLY:
send_activity_email.delay(email_to=recipients, user_name=user['name'],
resource_type=activity["resource_type"],
resource_name=activity["resource_name"],
activity_type=activity["activity_type"],
success=True, whisper_url=settings.web.BACKEND_URI)
if updated_activity.status in ActivityTracking.failure_statues:
send_activity_email.delay(email_to=recipients, user_name=user['name'],
resource_type=activity["resource_type"],
resource_name=activity["resource_name"],
activity_type=activity["activity_type"],
success=False, whisper_url=settings.web.BACKEND_URI)
else:
logger.info("NO ACTIVITY FOUND IN DATABASE")
asyncio.run(run_task())
@celery.task(name="send_activity_email", base=Singleton, queue='redis_queue')
def send_activity_email(email_to: list, user_name: str = "", resource_type: str = "", resource_name: str = "",
whisper_url: str = "", activity_type: str = "", success: bool = None) -> None:
"""
This function initializes the Mailchimp client and sends an email.
"""
# Handle special cases for activity type
if activity_type.lower() == "backup":
action_verb = "backed up"
elif activity_type.lower() == "restore":
action_verb = "restored"
else:
action_verb = f"{activity_type.lower()}ed" # default for other actions
if success:
subject = f"{activity_type.capitalize()} completed: {resource_type}"
text = (f"Hey {user_name},\n\nYour {resource_type} ({resource_name}) has successfully been {action_verb}. "
f"Please visit the following link for further details:\n{whisper_url}\n\nThanks,\nWanclouds Inc.")
else:
subject = f"{activity_type.capitalize()} failed: {resource_type}"
text = (
f"Hey {user_name},\n\nUnfortunately, there was an issue with your {activity_type} attempt for"
f" {resource_name}."
f"Please check the details and retry or contact support for further assistance.\n"
f"Visit {whisper_url} for more information.\n\nThanks,\nWanclouds Inc.")
mailchimp = MailchimpTransactional.Client(settings.email.MANDRILL_API_KEY)
message = {
"from_email": settings.email.MAIL_USERNAME,
"subject": subject,
"text": text,
"to": email_to
}
try:
response = mailchimp.messages.send({"message": message})
logger.info('Email sent successfully: {}'.format(response))
except ApiClientError as error:
logger.error('An exception occurred: {}'.format(error.text))
|
CloudWhisperCustomBot | app/worker/cypher_store.py | import time
from llama_index.core import VectorStoreIndex, ServiceContext
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.schema import TextNode
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.vector_stores.qdrant import QdrantVectorStore
from loguru import logger
from qdrant_client import QdrantClient
from app.core.config import settings
from app.redis_scheduler import celery_app
def get_qdrant_pipeline():
qd_client = create_qdrant_client(location=settings.qdrant.QDRANT_LOCATION,
api_key=settings.qdrant.QDRANT_API_KEY,
url=settings.qdrant.QDRANT_URL)
collection_name = "cypher_queries"
vector_store = QdrantVectorStore(
client=qd_client,
collection_name=collection_name
)
pipeline = IngestionPipeline(
transformations=[
OpenAIEmbedding(api_key=settings.openai.OPENAI_API_KEY,
embed_batch_size=10, model="text-embedding-3-small")
],
vector_store=vector_store
)
return pipeline
def create_qdrant_client(location=None, url=None, api_key=None, timeout=None):
# Use the provided arguments or fall back to the default configuration
location = location or settings.qdrant.QDRANT_LOCATION
url = url or settings.qdrant.QDRANT_URL
api_key = api_key or settings.qdrant.QDRANT_API_KEY
timeout = timeout or settings.qdrant.QDRANT_TIME_OUT
# Directly instantiate the QdrantClient with the appropriate parameters
return QdrantClient(url=url, api_key=api_key, timeout=timeout) if url and api_key else QdrantClient(location,
timeout=timeout)
def qdrant_retrieval(query, k):
q_client = create_qdrant_client(location=settings.qdrant.QDRANT_LOCATION,
api_key=settings.qdrant.QDRANT_API_KEY,
url=settings.qdrant.QDRANT_URL)
vector_store = QdrantVectorStore(
client=q_client,
collection_name='cypher_queries'
)
llm = OpenAI(api_key=settings.openai.OPENAI_API_KEY)
service_context = ServiceContext.from_defaults(llm=llm,
embed_model=OpenAIEmbedding(
api_key=settings.openai.OPENAI_API_KEY,
embed_batch_size=10, model="text-embedding-3-small"))
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
retriever = index.as_retriever(similarity_top_k=k, **{"vector_store_query_mode": "text_search"})
docs_with_scores = retriever.retrieve(query)
return docs_with_scores
@celery_app.task(name="qdrant")
def qdrant(question, cypher):
nodes = [TextNode(text=question, metadata={"cypher": str(cypher.strip())}, text_template='{content}')]
logger.info('<<<<<<<<<<<<<<<cypher and query>>>>>>>>>>>>>>>>>>>>')
logger.info(nodes)
logger.info('<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>')
start_time = time.time()
docs_with_scores = qdrant_retrieval(question, 1)
end_time = time.time()
execution_time = end_time - start_time
logger.info('<<<<<<<<<<<<<<getting time>>>>>>>>>>>>>>>>>>>>')
logger.info(execution_time)
logger.info('<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>')
logger.info('<<<<<<<<<<<<<<<docs_with_scores>>>>>>>>>>>>>>>>>>>>')
logger.info(docs_with_scores)
logger.info('<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>')
if not docs_with_scores:
logger.info('No similar documents found.')
pipeline = get_qdrant_pipeline()
pipeline.run(nodes=nodes)
else:
score = docs_with_scores[0].score
logger.info('<<<<<<<<<<<<<<<SCORE>>>>>>>>>>>>>>>>>>>>')
logger.info(score)
logger.info('<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>')
# Check if the similarity score is below the threshold
if score < 0.9:
logger.info("Similarity score below threshold. Adding new document to the vector store.")
start_time1 = time.time()
pipeline = get_qdrant_pipeline()
pipeline.run(nodes=nodes)
end_time1 = time.time()
execution_time1 = end_time1 - start_time1
logger.info('<<<<<<<<<<<<<<<Time taken to store the query>>>>>>>>>>>>>>>>>>>>')
logger.info(execution_time1)
logger.info('<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>')
else:
logger.info("Similar cypher and query already exists in the vector database. Skipping save.")
|
CloudWhisperCustomBot | app/web/__init__.py | from fastapi import APIRouter
from app.web.chats import whisper_chats
from app.web.clouds import whisper_clouds
# from app.web.knowledge_graphs import whisper_knowledge_graphs
from app.web.profiles import whisper_profiles
from app.web.profiles import router
from app.web.websockets import websockets_chats
from app.web.activity_tracking import activity_tracking_n_recommendations
api_router = APIRouter()
api_router.include_router(whisper_chats, prefix="/chats", tags=["Chat"])
api_router.include_router(whisper_profiles, prefix="/profiles", tags=["Profile"])
api_router.include_router(router, prefix="/user/profile", tags=["Profile"])
# api_router.include_router(whisper_knowledge_graphs, prefix="/knowledge_graph", tags=["Knowledge Graph"])
api_router.include_router(websockets_chats, prefix="/websockets", tags=["Websockets Chat"])
api_router.include_router(whisper_clouds, prefix="/clouds", tags=["Clouds"])
api_router.include_router(activity_tracking_n_recommendations, prefix="/activity-tracking-n-recommendations",
tags=["Activity Tracking & Recommendations"])
|
CloudWhisperCustomBot | app/web/profiles/schemas.py | from typing import Optional, Dict
from pydantic import BaseModel
from enum import Enum
class UpdateAppearanceRequest(BaseModel):
appearance: Optional[Dict] = None
class OnboardingStatus(str, Enum):
app_tour = "app_tour"
action_tour = "action_tour"
onboarded = "onboarded"
class UpdateOnboardingStatusRequest(BaseModel):
onboarding_status: OnboardingStatus
profile_id: str
|
CloudWhisperCustomBot | app/web/profiles/__init__.py | from .api import whisper_profiles, router
__all__ = ["whisper_profiles", "router"]
|
CloudWhisperCustomBot | app/web/profiles/api.py | from http import HTTPStatus
import httpx
from loguru import logger
from fastapi import APIRouter, Depends, HTTPException
from fastapi.responses import JSONResponse
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select
from app import models
from app.api_discovery.utils import update_profile_with_vpcplus_api_key, decrypt_api_key
from app.web.common.utils import update_appearance_in_db
from app.web.profiles.schemas import UpdateAppearanceRequest, UpdateOnboardingStatusRequest
from app.core.config import settings
from app.web.common import db_deps, deps
whisper_profiles = APIRouter()
router = APIRouter()
@whisper_profiles.post("/api-key", name="Add or Update VPC+ API Key")
async def add_api_key(
api_key: str,
user=Depends(deps.authenticate_user)
):
headers = {"API-KEY": api_key}
async with db_deps.get_db_session_async_context() as db_session:
with httpx.Client() as http_client:
resp = http_client.get(f"{settings.web.AUTH_LINK}/v1/users/api_key", headers=headers)
if not resp or resp.status_code == 401:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail={"error": f"Invalid or Expired API Key '{api_key}' found."}
)
api_key_json = resp.json()
api_key_name = api_key_json.get("name")
api_key_expiry = api_key_json.get("expires_at")
profile = await update_profile_with_vpcplus_api_key(
profile_id=user['id'],
api_key=api_key, api_key_name=api_key_name, api_key_expiry=api_key_expiry
)
api_key = profile.api_key
return JSONResponse(content={"name": api_key['name'] or '', "expires_at": api_key['expires_at'] or ''},
status_code=200)
@whisper_profiles.get("/api-key", name="Get VPC+ API Key details")
async def get_api_key(
user=Depends(deps.authenticate_user)
):
from app.models import Profile
async with db_deps.get_db_session_async_context() as db_session:
profile = (await db_session.scalars(select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
if not profile:
raise HTTPException(status_code=404, detail=f"User with ID {profile.user_id} not found")
api_key = profile.api_key
if not api_key:
raise HTTPException(status_code=204, detail="API Key not found")
decrypted_api_key = decrypt_api_key(api_key.get('API-KEY') or '')
return JSONResponse(content={"name": api_key['name'] or '', "key": decrypted_api_key or '',
"expires_at": api_key['expires_at'] or '', "last_updated_at": profile.last_updated_at.isoformat() if profile.last_updated_at else None }, status_code=200)
@router.get("")
async def get_user_details(
profile=Depends(deps.authenticate_user),
):
return profile
@router.patch("/appearance")
async def update_user_appearance(
setting: UpdateAppearanceRequest,
profile=Depends(deps.authenticate_user)
):
async with db_deps.get_db_session_async_context() as db_session:
updated_profile = await update_appearance_in_db(
profile=profile,
appearance=setting.appearance
)
if not updated_profile:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail=f"No User Profile with id {profile['uuid']} found"
)
return updated_profile.to_json()
@whisper_profiles.post("/onboarding", name="Update Onboarding Status")
async def add_onboarding_status(
request: UpdateOnboardingStatusRequest,
profile=Depends(deps.authenticate_user)
):
profile_id = request.profile_id
onboarding_status = request.onboarding_status
async with db_deps.get_db_session_async_context() as db_session:
profile = (await db_session.scalars(select(models.Profile).filter(models.Profile.user_id == profile_id))).one_or_none()
if not profile:
raise HTTPException(status_code=404, detail=f"Profile not found with id {profile_id}")
profile.onboarding = onboarding_status
await db_session.commit()
logger.info(f"Updated profile with ID {profile_id} to onboarding status {profile.onboarding}.")
return {
"detail": "Onboarding status updated successfully.",
"profile": profile.to_reference_json()
}
|
CloudWhisperCustomBot | app/web/common/consts.py | CREATED_AT_FORMAT_WITH_MILLI_SECONDS = '%Y-%m-%dT%H:%M:%S.%fZ'
|
CloudWhisperCustomBot | app/web/common/templates.py | ROUTE_TEMPLATE = """You are a team member of the 'Cloud Whisperer' project by Wanclouds, an expert Cloud Support Engineer specializing in cloud backups, disaster recovery, and migrations. Your expertise covers major public clouds (IBM Cloud, AWS, Google Cloud, Microsoft Azure) and Wanclouds' offerings. You assist potential customers with insights, queries, and guidance on backup, disaster recovery, and migration setups across these platforms.
Your task is to analyze the user's latest query along with the chat history to select the appropriate tool(s) for handling the request.
<available-tools>
Below you have the description of the available tools, You always have to use one of the listed tools.
1. QnA_or_Schedule_a_call: Use this tool for all general inquiries, questions, and requests for information regarding the VPC+ product, cloud services, or migration topics. This includes inquiries about migration alerts, requirements, procedures, or general datacenter updates. Additionally, use this tool when the user wants to schedule a call with the Wanclouds team..
Examples:
- "What is DRaaS?"
- "Tell me about your migration offerings."
- "How does cloud migration work?"
- "What are the benefits of migrating to the cloud?"
- "I received a notification about migrating from a specific datacenter, is this true?"
- "Do I need to migrate my resources from a particular IBM datacenter?"
- "What's the timeline for datacenter migrations?"
- "How do I initiate the migration process for my classic infrastructure?"
- "Can you help me move my workloads from the legacy datacenter to the new MZR?"
- "I want to migrate from dal09 dc, can you help me move my workloads"
- "I would like to schedule a call"
- "I want to setup a meeting with wanclouds team"
- "Can i discuss this over a call?"
- "I need a call to discuss my migration requirements"
- "How can i schedule a call with Wanclouds?"
2. Action: Use when the user intends to perform actions on the VPC+ product, such as creating, modifying, or deleting resources.
Examples:
- "Create a new VPC"
- "Delete my cloud account"
- "Modify my backup settings"
- "I want to create a one-time backup of my IBM VPC."
- "I want to set up scheduled backups for my IBM VPC."
- "I want to back up my Cloud Object Storage (COS) bucket once."
- "I want to schedule regular backups for my Cloud Object Storage (COS) bucket."
- "I want to restore a VSI backup in the same region and VPC."
- "I want to schedule backups for my IBM instance (VSI)."
- "I want to restore a backup for my Cloud Object Storage (COS) bucket."
- "I want to restore a backup for my IBM VPC."
- "I want to create a one-time backup for my IBM IKS cluster."
- "I want to set up scheduled backups for my IBM IKS clusters."
3. ClassicMigration: This tool SHOULD ONLY BE USED WHEN THE USER INTENDS TO PERFORM THE MIGRATION. This tool should not be used when the user is asking general help or wants to schedule a call related to migration.
Examples:
- "I want to migrate my resources from dal09 to dal10"
- "I want to migrate my workloads from data center dal9"
4. DataExplorer: Use when retrieving or analyzing specific data that that includes resources that are idle or need rightsizing from the user's VPC+ deployment across supported cloud platforms.
Examples:
- "How many VPCs do I have?"
- "Show my cloud accounts"
- "show me my snapshots"
- "show me my resources that are not backed up"
- "show me the snapshots that are idle"
- What are my idle resources?
- Which services are costing me?
- I want a report in my monthly spending.
- What are the instance names that need to be rightsized?
- Show me my recommendations.
- What idle resource do I have?
- What is the total cloud spending for the analyzed month?
- How many cost optimization recommendations were identified?
- What is the potential savings amount if the recommendations are implemented?
- What are the top 5 services contributing to the cloud spending?
- Which service category accounts for the largest portion of the total spending?
- What percentage of total costs do Kubernetes and Bare Metal services combined represent?
- How does the current month's spending compare to the previous month?
- What is the percentage difference between the current month's spending and the 12-month average?
- What types of older resources continue to incur costs?
- What are the main recommendations for cost optimization?
- Which service has the highest percentage of total spending?
- What is the cost for Log Analysis services?
- What is the overall trend in cloud spending over the past year?
- What percentage of total spending is attributed to database services?
- How many months of data are considered in the 12-month average?
- What areas are suggested for potential cost savings without compromising operational efficiency?
- "Backup recommendations."
- "Give me a recommendation for backing up my infrastructure."
- "Recommend which resources need backup in my IBM cloud."
- "Show me my backup recommendations."
- "I need a backup suggestion for my IBM resources."
- "Can you identify any resources that should be backed up immediately?"
</available-tools>
<tool-selection-process>
Use these instruction for the Tool Selection Process:
1. Analyze the complete user query (chat history + latest query).
2. Identify key phrases or intentions that align with specific tools.
3. For ANY migration-related queries that involve questions, notifications, or general information, always use QnA_or_Schedule_a_call.
5. Consider the user's expertise level based on their language and questions.
6. If multiple tools seem applicable, prioritize based on the primary intention of the query.
7. For complex queries, consider breaking them down into subtasks and selecting tools for each.
9. Always select one tool from the tools provided to you.
</tool-selection-process>
<chat_history>
{chat_history}
</chat_history>
<user_query>
{query}
</user_query>
Please provide your response in the following format:
complete_user_query: [Combine the chat history and the user's latest query to form a standalone user query including all the information provided by the user in user tone like user is saying]
Tool: [Select 'QnA_or_Schedule_a_call', 'DataExplorer', 'Schedule_call' or 'Action' after determining the latest intent of the user from the chat history and user's latest response]
Explanation: [Provide a clear explanation for your tool selection, referencing specific parts of the user's query and chat history that led to your decision]
1. Specific keywords or phrases from the user's query that influenced your decision
2. How the selected tool(s) best address the user's primary intention
3. If applicable, why you chose one tool over another for similar use cases
4. How you considered the user's expertise level in your selection]
If the query is ambiguous or requires clarification, state this in your explanation and suggest what additional information would be helpful to make a more accurate tool selection."""
NARROW_DOWN_INTENT = """You are a team member of the 'Cloud Whisperer' project by Wanclouds, your job is to detects intent from user queries. Your task is to classify the user's intent based on their query. Below are the possible intents with brief descriptions. Use these to accurately determine the user's goal, and output only the intent topic. When a user starts a conversation or asks a question, follow these steps:
<instructions>
1. Greet new users warmly to establish rapport if this is user first message and chat history is empty.
2. Review the user's chat history and latest request thoroughly.
3. Determine if the request matches any specific actions in the "available actions" section.
4. If the request is unclear or could match multiple actions:
- Acknowledge any ambiguity politely.
- Engage with the user to gather more details.
- Present potential matching options from the actions list.
5. If the user's request seems to be outside the scope of the available actions:
- Politely inform the user that the requested action is not currently available.
- Offer to assist with any of the actions from the existing list.
6. Maintain clear, concise, and professional communication throughout.
7. When a single intent is found, set 'Task Finished' to true and do not ask any further questions.
8. Only set 'Task Finished' to true when a single, specific action is identified.
9. Ensure 'Task Finished' remains False if multiple actions could potentially match the user's intent, prompting further clarification.
10. When a request could match multiple actions, list all potential options and ask the user to specify.
11. Do not ask for additional details beyond what's needed to identify the action.
12. Before finalizing an intent, check the Coming Soon actions for any similar or related intents.
13. If a user's query could match both an available action and a Coming Soon action, do not narrow down to a single intent. Instead, list both possibilities and set 'Task Finished' to false.
14. When a user's query is ambiguous (e.g., doesn't specify between one-time or scheduled actions), do not assume. List all relevant intents and set 'Task Finished' to false.
15. Be cautious about inferring details not explicitly stated by the user. When in doubt, it's better to ask for clarification than to narrow down incorrectly.
</instructions>
<cloud_whisperer_introduction>
'Cloud Whisperer', by Wanclouds, is an expert Cloud Support Engineer specializing in cloud backups, disaster recovery, and migrations. Your expertise covers major public clouds (IBM Cloud, AWS, Google Cloud, Microsoft Azure) and Wanclouds' offerings. You assist potential customers with insights, queries, and guidance on backup, disaster recovery, and migration setups across these platforms.
</cloud_whisperer_introduction>
<actions>
Available actions:
1. IBM Classic or Legacy Data Center Migration: As part of IBM Datacenter Modernization initiative, IBM is closing older and classic legacy datacenters and migrating customers to the new data centers or multi-zone regions (also called MZR). Dal09 or Dal9 or Dallas 9 is currently affected and customers will have to move to another datacenter or VPC. IBM Cloud has partnered with Wanclouds to assist with these migrations. Customers will need to migrate servers including both Bare Metal or physical servers, virtual servers or machines called VSIs or VMs, Firewalls, Loadbalancers.
2. Create one-time IBM VPC backup: One-time backup of an IBM VPC blueprint, including all configurations and architecture details.
3. Create scheduled IBM VPC backup: Set up periodic backups of an IBM VPC blueprint, including all configurations and architecture details with customizable backup policies policies.
4. Restore IBM VPC backup: Restore IBM VPC backups.
5. Create one-time Cloud Object Storage (COS) bucket backup: One-time backup of Cloud Object Storage (COS) buckets.
6. Create scheduled Cloud Object Storage (COS) bucket backup: Set up periodic backups for IBM COS buckets with customizable policies.
7. Restore Cloud Object Storage (COS) bucket backup: Restore IBM Cloud Object Storage (COS) bucket backups.
8. Create one-time IBM IKS cluster backup: One-time backup of IBM Kubernetes Service (IKS) clusters.
9. Create scheduled IBM IKS cluster backup: Set up periodic backups for IBM IKS clusters with customizable policies.
10. Create scheduled IBM instance (VSI) backup: Set up periodic backups for an IBM instance with policies.
11. Restore IBM VSI backup in same region and VPC: Restore IBM VSI backup in same region and VPC.
12. Restore IBM VSI backup in different region and VPC: Restore IBM VSI backup in different region and VPC
13. Restore IKS backup in an existing cluster: Restore IKS backup in an existing IKS cluster.
14. Restore IBM IKS Cluster backup in existing vpc: Restore IBM IKS Cluster backup in existing VPC.
</actions>
<coming soon actions>
Coming soon actions:
1. Restore VSI backup using custom template: Restore VSI backup using custom template meaning the user can restore VSI backup in different vpc in same or different region.
2. Create one-time IBM Virtual Server Instance (VSI) Backup: One-time backup of IBM Virtual Server instances.
</coming soon actions>
<chat_history>
{chat_history}
</chat_history>
<user_query>
{query}
</user_query>
Please strictly adhere to the following template format:
Thought: [Take a moment to relax and start analyzing the chat history and the latest user query. Find out user request matches to which Actions and Coming Soon Actions. Do not make assumptions]
Intent Narrowed: [Action(s) from action list, coming soon list that can be narrowed down based on the user chat]
Task Analysis: [Analyze "Intent Narrowed" section carefully if there are more than one action narrowed down then mark task as false, if there is only action narrowed down and its from coming soon then again mark task as false otherwise plan out yourself]
Task Finished: [Mark it True/False only if action narrowed down to one of the ActionsList]
Response: {response}"""
CONFIDENCE_SCORING_BOT = """You are a confidence scoring assistant for intent classification, a team member of the 'Cloud Whisperer' project by Wanclouds. Your job is to analyze the conversation, the single narrowed-down intent provided by the intent narrowing bot, along with the intents list and coming soon actions. You will assign a confidence score based on how well the query and context match the intent.
<Instructions>
1. Carefully read the conversation, list of Intents, coming soon actions, and the narrowed-down intent.
2. Analyze the semantic similarity between the query, chat context, and the intent description.
3. Consider any specific keywords or phrases that strongly indicate the particular intent.
4. Assess how well the intent aligns with the overall conversation context.
5. Check if there are any similar or conflicting actions in the Coming Soon list.
6. Assign a confidence score between 0 and 100 for the narrowed-down intent.
7. Provide a brief explanation for your scoring.
8. Use the following confidence levels:
- High: 90-100
- Moderate: 70-89
- Low: 0-69
9. If there's a similar action in Coming Soon, reduce the confidence score appropriately and highlight this in your explanation.
10. If the user's query is ambiguous or could match multiple intents (including Coming Soon actions), assign a lower confidence score and recommend clarification.
</Instructions>
<cloud_whisperer_introduction>
'Cloud Whisperer', by Wanclouds, is an expert Cloud Support Engineer specializing in cloud backups, disaster recovery, and migrations. Your expertise covers major public clouds (IBM Cloud, AWS, Google Cloud, Microsoft Azure) and Wanclouds' offerings. You assist potential customers with insights, queries, and guidance on backup, disaster recovery, and migration setups across these platforms.
</cloud_whisperer_introduction>
<conversation>
Convertion:
{chat_history}<conversation
<intents_list>
List of Intents that were provided to narrowing down bot:
1. IBM Classic or Legacy Data Center Migration: As part of IBM Datacenter Modernization initiative, IBM is closing older and classic legacy datacenters and migrating customers to the new data centers or multi-zone regions (also called MZR). Dal09 or Dal9 or Dallas 9 is currently affected and customers will have to move to another datacenter or VPC. IBM Cloud has partnered with Wanclouds to assist with these migrations. Customers will need to migrate servers including both Bare Metal or physical servers, virtual servers or machines called VSIs or VMs, Firewalls, Loadbalancers.
2. Create one-time IBM VPC backup: One-time backup of an IBM VPC, including all configurations and resources.
3. Create scheduled IBM VPC backup: Set up periodic backups for an IBM VPC with customizable policies.
4. Restore IBM VPC backup: Restore IBM VPC backups.
5. Create one-time Cloud Object Storage (COS) bucket backup: One-time backup of Cloud Object Storage (COS) buckets.
6. Create scheduled Cloud Object Storage (COS) bucket backup: Set up periodic backups for IBM COS buckets with customizable policies.
7. Restore Cloud Object Storage (COS) bucket backup: Restore IBM Cloud Object Storage (COS) bucket backups.
8. Create one-time IBM IKS cluster backup: One-time backup of IBM Kubernetes Service (IKS) clusters.
9. Create scheduled IBM IKS cluster backup: Set up periodic backups for IBM IKS clusters with customizable policies.
10. Create scheduled IBM instance (VSI) backup: Set up periodic backups for an IBM instance with policies.
11. Restore IBM VSI backup in same region and VPC: Restore IBM VSI backup in same region and VPC.
12. Restore IBM VSI backup in different region and VPC: Restore IBM VSI backup in different region and VPC
13. Restore IKS backup in an existing cluster: Restore IKS backup in an existing IKS cluster.
14. Restore IBM IKS Cluster backup in existing vpc: Restore IBM IKS Cluster backup in existing VPC.
</intents_list>
<coming soon actions>
Coming soon actions:
1. Restore VSI backup using custom template: Restore VSI backup using custom template meaning the user can restore VSI backup in different vpc in same or different region.
2. Create one-time IBM Virtual Server Instance (VSI) Backup: One-time backup of IBM Virtual Server instances.
</coming soon actions>
<intent_narrowed_down>
Narrowed Intent: {narrowed_intent}
</intent_narrowed_down>
Please respond in the following format:
Analysis: [Take a moment to relax and start carefully analyzing the conversation, intent narrowed, intent list and coming soon actions. Highlight any ambiguities or conflicts with Coming Soon actions. If the user's query is ambiguous or could match multiple intents like (Restore VPC Backup & Restore IKS Cluster in existing VPC), assign a lower confidence score and recommend clarification]
Confidence Score: [Score] - [Brief explanation, including any impact from Coming Soon actions]
Overall Confidence: [High/Moderate/Low]
Recommendation: [Whether to proceed without confirmation, use implicit confirmation, ask for explicit confirmation, or seek clarification on specific points]"""
NARROW_DOWN_MIGRATION_INTENT = """
You are a team member of the 'Cloud Whisperer' project by Wanclouds, your job is to detects intent from user queries. Your task is to classify the user's intent based on their query. Below are the possible intents with brief descriptions. Use these to accurately determine the user's goal, and output only the intent topic. When a user starts a conversation or asks a question, follow these steps:
<instructions>
1. Greet new users warmly to establish rapport if this is user first message and chat history is empty.
2. Review the user's chat history and latest request thoroughly.
3. Determine if the request matches any specific actions in the "available actions" section.
4. If the request is unclear or could match multiple actions:
- Acknowledge any ambiguity politely.
- Engage with the user to gather more details.
- Present potential matching options from the actions list.
5. If the user's request seems to be outside the scope of the available actions:
- Politely inform the user that the requested action is not currently available.
- Offer to assist with any of the actions from the existing list.
6. Maintain clear, concise, and professional communication throughout.
7. When a single intent is found, set 'Task Finished' to true and do not ask any further questions.
8. Only set 'Task Finished' to true when a single, specific action is identified.
9. Ensure 'Task Finished' remains False if multiple actions could potentially match the user's intent, prompting further clarification.
10. When a request could match multiple actions, list all potential options and ask the user to specify.
11. Do not ask for additional details beyond what's needed to identify the action.
</instructions>
<cloud_whisperer_introduction>
'Cloud Whisperer', by Wanclouds, is an expert Cloud Support Engineer specializing in cloud backups, disaster recovery, and migrations. Your expertise covers major public clouds (IBM Cloud, AWS, Google Cloud, Microsoft Azure) and Wanclouds' offerings. You assist potential customers with insights, queries, and guidance on backup, disaster recovery, and migration setups across these platforms.
</cloud_whisperer_introduction>
<actions>
Available actions:
1. START IBM Classic or Legacy Data Center Migration: In this the user can start the pre migration step of selecting their workloads and then scheduling a call with the Wanclouds Migration Team.
2. Schedule a meeting for IBM Classic or Legacy Data Center Migration: In this the user can directly schedule a meeting and then discuss everything related to migration on call with the Wanclouds Migration Team.
</actions>
<examples>
Suppose the Actions are:
1. Enable email notifications: This Action enables email notification of your alerts. As soon as you there is an alert, you'll get email.
2. Enable SMS notifications: This Action enables sms notification of your alerts. As soon as you there is an alert, you'll get sms.
3. Disable email notifications: This Action disables email notification of your alerts.
4. Disable SMS notifications: This Action disables sms notification of your alerts.
and coming soon actions are
1. change email password
<example0>
<chat_history>
</chat_history>
<user_query>
I want to change my email password.
</user_query>
You should respond as:
Thought: This is the user's first message and chat history is empty, so a greeting is appropriate. The user query doesn't match any existing actions in the available list. It is categorized under "Coming Soon" actions.
Intent Narrowed:
- Change Email Password
Task Analysis: This task is currently under development and is not available at the moment.
Task Finished: False
Response: Hello and welcome! I'm here to assist you. Regarding your request to change your email password, this feature is currently being developed and will be available soon. Please check back later for updates. In the meantime, if you need any assistance with other features, feel free to ask!
<example1>
<chat_history>
</chat_history>
<user_query>
I want to turn on notifications.
</user_query>
You should respond as:
Thought: This is the user's first message and chat history is empty, so a greeting is appropriate. The user wants to enable notifications, but it's not clear if they want email or SMS notifications.
Intent Narrowed:
- Enable email notifications
- Enable SMS notifications
Task Analysis: The task is not finished as we have more than one intent
Task Finished: False
Response: Hello and welcome! I'm here to help you with turning on notifications. Could you please specify whether you'd like to enable email notifications or SMS notifications?
<example2>
<chat_history>
<user>I want to turn on notifications.</user>
<assistant>Hello and welcome! I'm here to help you with turning on notifications. Could you please specify whether you'd like to enable email notifications or SMS notifications?</assistant>
</chat_history>
<user_query>
I prefer email notifications.
</user_query>
You should respond as:
Thought: The user has clarified that they want to enable email notifications. This is not their first message, so no greeting is needed.
Intent Narrowed: Enable email notifications
Task Analysis: The task can now be marked as finished as there is a single intent in the Narrow Down section
Task Finished: True
Response: Sure, I can help you with that.
<example3>
<chat_history>
<user>How do I manage my account settings?</user>
<assistant>I'd be happy to help you with managing your account settings. Could you please specify which particular setting you'd like to adjust? For example, we can help with notification preferences.</assistant>
<user>Oh, I see. In that case, I'd like to adjust my notifications.</user>
<assistant>Certainly! I can help you with adjusting your notifications. Would you like to enable or disable notifications? And do you prefer email or SMS notifications?</assistant>
</chat_history>
<user_query>
Hello, I want to turn on notifications. Turn on notifications for sms service
</user_query>
You should respond as:
Thought: The user wants to enable SMS notifications. This is not their first message, so no greeting is needed.
Intent Narrowed: Enable SMS notifications
Task Analysis: The task can now be marked as finished as there is a single intent in the Narrow Down section
Task Finished: True
Response: Certainly, I can help you enable SMS notifications for your alerts.
<example4>
<chat_history>
<user>Tell me about your product</user>
<assistant>Hello, I'm your support agent for ABC product....</assistant>
</chat_history>
<user_query>
Can you send an email to Ali that you need to attend a meeting with customer
</user_query>
You should respond as:
Thought: The user wants to send an email which is not in actionslist or coming soon, so it's out of context. This is not their first message, so no greeting is needed.
Intent Narrowed: []
Task Analysis: Task should be marked as False as user query is out of context
Task Finished: False
Response: I apologize, but I'm not able to send emails on your behalf. My capabilities are limited to helping with specific actions related to our product, such as managing notifications. Is there anything related to our product that I can assist you with?
</example4>
</examples>
<chat_history>
{chat_history}
</chat_history>
<user_query>
{query}
</user_query>
Please strictly adhere to the following template format:
Thought: [Analyze the chat history and the latest user query in relation to the Actions/ComingSoon. Do not make assumptions]
Intent Narrowed: [Action(s) from action list that are narrowed down based on the user chat]
Task Analysis: [Analyze "Intent Narrowed" section carefully]
Task Finished: [Mark it True/False only if action narrowed down to one of the ActionsList]
Response: [Your response to the user, including any necessary questions or confirmations]
"""
KNOWLEDGE_GRAPH_PROMPT = """ You are a helpful assistant tasked with extracting 'NEW' Nodes and relationships from
the Open API spec of API responses for the neo4j knowledge graph. It's crucial to avoid duplicates from the existing
nodes.
###Example1:
```
"Existing Nodes":["CarVault": "A mix of cars gathered from our database"]
*existing node description is same as vehicleCollection thats why it was not included in list of nodes in response.
input :"200": {
"content": {
"application/json": {
"schema": {
"properties": {
"vehicleCollection": {
"type": "array",
"items": {
"type": "object",
"properties": {
"vehicleId": {
"type": "string",
"description": "Unique identifier for the vehicle."
},
"make": {
"type": "string",
"description": "Manufacturer of the vehicle."
},
"model": {
"type": "string",
"description": "Model of the vehicle."
},
"year": {
"type": "integer",
"description": "Manufacturing year of the vehicle."
},
"owner": {
"type": "object",
"properties": {
"userId": {
"type": "string",
"description": "Identifier for the owner of the vehicle."
},
"name": {
"type": "string",
"description": "Name of the vehicle owner."
},
"contactDetails": {
"type": "object",
"properties": {
"emailAddress": {
"type": "string",
"description": "Email address of the owner."
},
"phoneNumber": {
"type": "string",
"description": "Phone number of the owner."
}
},
"required": ["emailAddress", "phoneNumber"]
}
},
"required": ["userId", "name", "contactDetails"]
}
},
"required": ["vehicleId", "make", "model", "year", "owner"]
}
},
"pagination": {
"type": "object",
"properties": {
"current": {
"type": "integer",
"description": "Current page of the vehicle collection."
},
"perPage": {
"type": "integer",
"description": "Number of vehicle records per page."
},
"total": {
"type": "integer",
"description": "Total number of vehicle records available."
}
},
"required": ["current", "perPage", "total"]
}
},
"required": ["vehicleCollection", "pagination"]
}
}
},
"description": "Successful response"
}
}
Response should be:
"Nodes": {
"Vehicle": "A single vehicle, typically associated with an owner.",
"Owner": "The individual or entity that legally possesses the vehicle.",
"ContactDetails": "Contact information of the vehicle owner, including email and phone number.",
"Pagination": "Information about the pagination of the vehicle list."
},
"Relationships": [
{"source": "Vehicle", "target": "Owner", "relationship": "OWNED_BY"},
{"source": "Owner", "target": "ContactDetails", "relationship": "HAS_CONTACT_INFO"}
],
"Location": {
"VehicleCollection": "vehicleCollection[*]",
"Vehicle": "vehicleCollection[*]",
"Owner": "vehicleCollection[*].owner",
"ContactDetails": "vehicleCollection[*].owner.contactDetails",
"Pagination": "pagination"
},
"Attribute": {
"Vehicle": ["vehicleId","make","model","year"]
"Owner": ["userId","name"]
"ContactDetails": ["emailAddress","PhoneNumber"]
"Pagination": ["current","perPage","total"]
}
}
```
"Existing Nodes": {nodes}
Instructions:
1. Begin by analyzing previously used nodes and successful responses in the API spec.
2. Examine existing node descriptions. If the node already exists, utilize it without creating a duplicate in your
response.
3. Do not create any aliases nodes in your response that already exist in "Existing Nodes".
3. Only append new nodes found in the API spec.
4. Ensure CamelCase naming convention for new nodes.
5. Do not omit any nodes.
6. It's crucial to ensure that the descriptions for each parameter highlight their intrinsic properties rather than
just their relation to the resources.
Response Format:
- Nodes: [Python dictionary listing all significant nodes in the Open API spec response payload with their brief
descriptions. Do not omit any node.]
- Relationships: [Python List containing all meaningful relationships in the Open API specs. Use dictionaries with
'source', 'target', and 'relationship' keys.]
- Location: {Python dictionary mapping nodes to their locations in the JSON structure of the API's successful
response schema. Use '[*]' if the location is a list. Separate location keys with '.'}
- Attribute: {Python dictionary associating nodes with their attribute keys.}
Failure to include any node, relationship, or location will result in missing connections in the data.
"""
VPC_COST_OPTIMIZATION_TEMPLATE = """
The tool response has returned json data having information of:
1) Cloud service cost: per service cost details of the selected cloud account
2) Cost trends: actions taken from the recommendation and cost saved from those recommendations
<tool_response>
<cost-by-services-json>
Here is the cloud service cost json:
{service_cost}
</cost-by-services-json>
<cost-trends>
Here are the cost trends from last 12 months:
{cost_trend_payload}
</cost-trends>
</tool_response>
Now take a moment to relax. Understand the user and find out required answer from response returned"""
SOFTLAYER_INFRA_ASSESSMENT_PROMPT = """
The tool response has returned json data having information of:
1) IBM Softlayer Cloud Resources assessment data: include the high-level assessments of classic infrastructures and identifying potential cost optimization opportunities for migration to IBM Cloud VPC.
Provided IBM Softlayer Infrastructure Data:
<ibm-softlayer-cloud-resources-assessment>
The report should be based on the following IBM Softlayer Cloud Resources assessment data:
{ibm_softlayer_cloud_payload}
</ibm-softlayer-cloud-resources-assessment>
Analyzing the user query: '{query}' analyze the JSON data above and generate a response accordingly.
Important: All necessary data has already been provided, so please proceed with analyzing and generating the report without requesting further details.
"""
|
CloudWhisperCustomBot | app/web/common/utils.py | import aiohttp
import asyncio
import httpx
import json
import requests
import time
from fastapi import HTTPException
from loguru import logger
from sqlalchemy import select, update
from sqlalchemy.orm import selectinload
from app import models
from app.api_discovery.utils import update_profile_with_vpcplus_api_key
from app.core.config import settings
from app.web import activity_tracking
from app.web.common import db_deps
from app import models
from app.models.activity_tracking import ActivityTracking
async def api_key_msg_event_generator(api_key_msg, chat_id):
from app.models import Message, Chat
lines = api_key_msg.split("\n")
assistant_message = ""
for line in lines:
chunk = line
assistant_message += chunk + "\n"
yield {"event": "message", "data": line}
async with db_deps.get_db_session_async_context() as db_session:
chat = (await db_session.scalars(select(Chat).filter(Chat.id == chat_id))).one_or_none()
chat_message = Message(msg_type=Message.TYPE_ASSISTANT, content=assistant_message, msg_category=Message.TYPE_QNA)
chat_message.chat = chat
db_session.add(chat_message)
await db_session.commit()
yield {"event": "chat_info", "data": json.dumps(chat.to_reference_json())}
async def user_msg_event_generator(response_for_user, chat_id):
from app.models import Message, Chat
lines = response_for_user.split("\n")
assistant_message = ""
for line in lines:
chunk = line.lstrip()
assistant_message += chunk + "\n"
yield {"event": "message", "data": line}
async with db_deps.get_db_session_async_context() as db_client:
chat = (await db_client.scalars(select(Chat).filter(Chat.id == chat_id))).one_or_none()
chat_message = Message(msg_type=Message.TYPE_ASSISTANT, content=assistant_message, msg_category=Message.TYPE_QNA)
chat_message.chat = chat
db_client.add(chat_message)
await db_client.commit()
yield {"event": "chat_info", "data": json.dumps(chat.to_reference_json())}
async def fetch_and_update_vpcplus_api_key(authorization, user_id):
headers = {"Authorization": f"Bearer {authorization.credentials}"}
try:
async with httpx.AsyncClient(timeout=60.0) as http_client:
resp = await http_client.get(f"{settings.web.AUTH_LINK}/v1/users/api_key", headers=headers)
if resp.status_code == 200:
api_key_json = resp.json()
api_key = api_key_json.get("key")
api_key_name = api_key_json.get("name")
api_key_expiry = api_key_json.get("expires_at")
async with db_deps.get_db_session_async_context() as session:
await update_profile_with_vpcplus_api_key(
profile_id=user_id,
api_key=api_key, api_key_name=api_key_name,
api_key_expiry=api_key_expiry if api_key_expiry else None
)
else:
logger.error(f"Failed to fetch API key: {resp.status_code} {resp.text}")
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error occurred: {str(e)}")
except httpx.RequestError as e:
logger.error(f"Request error occurred: {str(e)}")
except httpx.TimeoutException as e:
logger.error(f"Request timed out: {str(e)}")
except Exception as e:
logger.error(f"An unexpected error occurred: {str(e)}")
async def update_appearance_in_db(profile, appearance):
async with db_deps.get_db_session_async_context() as db_session:
query = select(models.Profile).options(selectinload(models.Profile.chats)).filter_by(id=profile['uuid'])
result = await db_session.execute(query)
profile_setting = result.scalars().first()
if profile_setting is None:
profile_setting = models.Profile(appearance=appearance, user_id=profile["id"], name=profile["name"],
email=profile["email"])
profile_setting.profile_id = profile['uuid']
db_session.add(profile_setting)
if appearance is not None:
profile_setting.appearance = appearance
await db_session.commit()
return profile_setting
async def update_activity_tracking(activity_response, chat_id, action_id, user_id=None):
import json
logger.info(f"Activity Response: {activity_response}")
# Parse the activity_response string into a dictionary
try:
activity_response = json.loads(activity_response)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse activity_response: {e}")
return
try:
async with db_deps.get_db_session_async_context() as db_session:
result = await db_session.scalars(select(models.Profile).filter(models.Profile.user_id == user_id))
profile = result.one_or_none()
try:
if activity_response.get("reporting_type") == "WorkflowsWorkspace":
resource_type = activity_response.get("fe_request_data").get("resource_type")
resource_name = activity_response.get("fe_request_data").get("backup_name")
status = activity_response["status"]
activity_type = activity_response.get("workspace_type")
if activity_response.get("workspace_type") == "TYPE_RESTORE":
activity_type = "RESTORE"
activity_tracking = ActivityTracking(
workflow_id=activity_response["id"],
user_id=user_id,
resource_name=resource_name,
fe_request_data=activity_response.get("fe_request_data"),
resource_type=resource_type,
activity_type=activity_type,
created_at=activity_response["created_at"],
summary=f"Restoration of {resource_name} is {status}",
email=profile.email,
status=status,
started_at=activity_response.get("started_at"),
completed_at=activity_response.get("completed_at"),
action_id=action_id,
chat_id=chat_id
)
else:
activity_type = activity_response["workflow_nature"]
resource_type = activity_response["resource_type"]
if resource_type == "IBMKubernetesCluster" and activity_type == "CREATE":
activity_type = "RESTORE"
else:
activity_type = activity_response["workflow_nature"]
activity_tracking = ActivityTracking(
workflow_id=activity_response["id"],
user_id=user_id,
resource_name=activity_response["workflow_name"],
fe_request_data=activity_response.get("fe_request_data"),
resource_type=activity_response["resource_type"],
activity_type=activity_type,
created_at=activity_response["created_at"],
summary=activity_response["summary"],
email=activity_response.get("email") or profile.email,
status=activity_response["status"],
started_at=activity_response.get("started_at"),
completed_at=activity_response.get("completed_at"),
action_id=action_id,
chat_id=chat_id
)
except KeyError as e:
logger.error(f"Missing key in activity_response: {e}")
return
except Exception as e:
logger.error(f"Error while constructing activity_tracking: {e}")
return
activity_tracking.profile = profile
db_session.add(activity_tracking)
await db_session.commit()
logger.info("Activity Tracked Successfully")
except Exception as e:
logger.error(f"Unexpected error in update_activity_tracking: {e}")
async def update_activity_status(activity_response, activity_id):
from app.models import ActivityTracking
logger.info(f"Activity Response: {activity_response}")
async with db_deps.get_db_session_async_context() as db_session:
activity = (await db_session.scalars(
select(models.ActivityTracking)
.filter(models.ActivityTracking.id == activity_id)
)).one_or_none()
if not activity:
return
status = activity_response["status"]
activity_query = await db_session.execute(
update(ActivityTracking)
.where(ActivityTracking.id == activity_id)
.values(status=status, completed_at=activity_response.get('completed_at'),
summary=activity_response.get("summary") or f"Restoration of {activity.resource_name} is {status}")
.returning(ActivityTracking)
)
updated_activity = activity_query.scalars().first()
await db_session.commit()
return updated_activity
# Custom context manager for making requests and handling errors
class HttpRequestHandler:
def __init__(self, session):
self.session = session
async def post(self, url, headers):
try:
async with self.session.post(url, headers=headers) as response:
if response.status != 202:
raise HTTPException(status_code=response.status, detail=f"Error: {await response.text()}")
return await response.json()
except aiohttp.ClientError as e:
raise HTTPException(status_code=500, detail=f"HTTP request failed: {str(e)}")
async def get(self, url, headers):
try:
async with self.session.get(url, headers=headers) as response:
if response.status != 200:
raise HTTPException(status_code=response.status, detail=f"Error: {await response.text()}")
return await response.json()
except aiohttp.ClientError as e:
raise HTTPException(status_code=500, detail=f"HTTP request failed: {str(e)}")
# Async function to start the workflow
async def start_workflow(headers, cloud_account_id, session):
COST_OPTIMIZATION_REPORT_URL = settings.web.AUTH_LINK + f"/v1/softlayer/recommendations/{cloud_account_id}"
# Make POST request to start the workflow
handler = HttpRequestHandler(session)
response = await handler.post(COST_OPTIMIZATION_REPORT_URL, headers)
# Extract and return workflow ID
workflow_id = response.get("id")
if not workflow_id:
raise HTTPException(status_code=404, detail="Workflow ID not found in response.")
return workflow_id
# Async function to poll workflow status with retry mechanism and timeout
async def poll_workflow_status(workflow_id, headers, session, max_poll_time=60, polling_interval=5, max_retries=5):
WORKFLOW_STATUS_URL = settings.web.AUTH_LINK + f"/v1/ibm/workflows/{workflow_id}"
handler = HttpRequestHandler(session)
retries = 0
while retries <= max_retries:
try:
# Poll workflow status
workflow_data = await handler.get(WORKFLOW_STATUS_URL, headers)
# Check if workflow is completed
if workflow_data.get("status") == "COMPLETED_SUCCESSFULLY":
return workflow_data.get("resource_json", {})
except HTTPException as e:
# Retry logic with exponential backoff
retries += 1
if retries > max_retries:
raise HTTPException(status_code=500, detail=f"Failed after {max_retries} retries: {str(e)}")
await asyncio.sleep(2 ** retries) # Exponential backoff
# Wait for polling interval
await asyncio.sleep(polling_interval)
# If the polling timed out, return in-progress status
return {"status": "in progress", "message": "Workflow is still running. Check back later.",
"workflow_id": workflow_id}
# Main function to execute workflow and retrieve cost response
async def get_softlayer_cloud_cost_response(headers, cloud_account_id, max_poll_time=60, polling_interval=5,
max_retries=5):
async with aiohttp.ClientSession() as session:
try:
# Step 1: Start the workflow
workflow_id = await start_workflow(headers, cloud_account_id, session)
# Step 2: Poll for workflow completion
workflow_result = await asyncio.wait_for(
poll_workflow_status(workflow_id, headers, session, max_poll_time=max_poll_time,
polling_interval=polling_interval, max_retries=max_retries),
timeout=max_poll_time
)
# Return workflow result
return workflow_result
except asyncio.TimeoutError:
return {"status": "in progress", "message": "Workflow polling timed out.", "workflow_id": workflow_id}
except HTTPException as e:
raise HTTPException(status_code=500, detail=f"Error during workflow execution: {str(e)}")
|
CloudWhisperCustomBot | app/web/common/chats_websockets_utils.py | from types import AsyncGeneratorType
import aiohttp
import types
import asyncio
import httpx
import json
import re
from datetime import datetime
from fastapi import HTTPException
from fastapi.security import HTTPAuthorizationCredentials
from loguru import logger
from sqlalchemy import asc
from sqlalchemy.future import select
from sqlalchemy.orm import selectinload
from sse_starlette import EventSourceResponse
from app.core.config import settings
from app.web.common import db_deps
from app.web.common.cloud_setup_instruction_messages import (IBM_CLOUD_ACCOUNT_MESSAGE, GENERAL_CLOUD_ACCOUNT_MESSAGE,
IBM_CLASSIC_CLOUD_ACCOUNT_MESSAGE)
from app.web.common.templates import (ROUTE_TEMPLATE, NARROW_DOWN_INTENT, NARROW_DOWN_MIGRATION_INTENT,
CONFIDENCE_SCORING_BOT)
from app.web.common.utils import api_key_msg_event_generator
from app.whisper.consts import WHISPER_USER_ROLE, WHISPER_ASSISTANT_ROLE
from app.whisper.llms.anthropic import AnthropicLLM
from app.whisper.utils.action_engine import ActionPhaseClaude, ComplexActionPhaseClaude
from app.whisper.utils.migration_action_engine import MigrationActionPhaseClaude
async def process_routing(chat_id):
"""
Determines from chat history if latest user query should be routed to which tool.
Parameters:
chat_history (list): A list of previous chat messages and user latest query. It is dictionary of message type and
content. Like [{'type': 'Human', 'text': 'content'}, {'type': 'assistant', 'text': 'content'}]
Returns:
string: one of the following tool: QnA, Action
"""
from app.models import Message
try:
async with db_deps.get_db_session_async_context() as db_session:
messages_obj = (await db_session.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
if not messages_obj:
raise ValueError("No messages found for the given chat_id")
chat_json = [message.to_reference_json() for message in messages_obj]
while len(chat_json) >= 6 and chat_json[-6]['type'] == 'Assistant':
chat_json.pop(0)
chat_json_last_5 = chat_json[-6:] # limit chat history to last 5 messages
query = chat_json_last_5[-1]['text'] # query that user just asked
chat_history_str = ''
for chat_ in chat_json_last_5[:-1]: # don't add user latest query in chat history
chat_history_str += f"<{chat_['type'].lower()}>: {chat_['text'].strip()}</{chat_['type'].lower()}>\n"
client = AnthropicLLM()
prompt = ROUTE_TEMPLATE.format(query=query, chat_history=chat_history_str.strip())
response = ''
feedback_sent = False
for attempt in range(2):
if not feedback_sent:
client.add_message(role=WHISPER_USER_ROLE, content=prompt)
try:
async for text in client.process_stream():
response += text
if "complete_user_query:" in response:
if "Tool: Action" in response:
user_query = response.split("Tool: Action")[0]
user_query = user_query.split("complete_user_query:")[1].strip()
logger.info(response)
return user_query, "Action"
elif "Tool: QnA_or_Schedule_a_call" in response:
user_query = response.split("Tool: QnA_or_Schedule_a_call")[0]
user_query = user_query.split("complete_user_query:")[1].strip()
logger.info(response)
return user_query, "QnA_or_Schedule_a_call"
elif "Tool: DataExplorer" in response:
user_query = response.split("Tool: DataExplorer")[0]
user_query = user_query.split("complete_user_query:")[1].strip()
logger.info(response)
return user_query, "InformationRetrieval"
elif "Tool: ClassicMigration" in response:
user_query = response.split("Tool: ClassicMigration")[0]
user_query = user_query.split("complete_user_query:")[1].strip()
logger.info(response)
return user_query, "ClassicMigration"
if attempt == 0:
logger.info("Retrying with feedback...")
feedback = "Internal feedback: The response you generated seems to be in an incorrect format. Please review the response and ensure it adheres to the expected format, such as 'Tool: Action', 'Tool: QnA', 'Tool: ClassicMigration' or 'Tool: DataExplorer'. Additionally, the response should contain 'complete_user_query' with the full query entered by the user."
client.add_message(role=WHISPER_ASSISTANT_ROLE, content=feedback)
feedback_sent = True
continue
else:
logger.warning(f"Retry failed. Defaulting to QnA. Response: {response}")
return query, "QnA_or_Schedule_a_call"
except Exception as e:
logger.error(f"Unexpected error during response processing: {e}")
return None, None
except Exception as e:
logger.error(f"An error occurred while processing routing: {e}")
return None, None
async def execute_qna(user_name, question, chat_id):
from app.whisper.utils.qna_bot.base import QnABot
from app.models import Message
error = None
import traceback
try:
async with db_deps.get_db_session_async_context() as db_client:
messages_obj = (await db_client.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
if not messages_obj:
raise ValueError("No messages found for the given chat_id")
chat_json = [message.to_reference_json() for message in messages_obj]
chat = messages_obj[0].chat
while len(chat_json) >= 6 and chat_json[-6]['type'] == 'Assistant':
chat_json.pop(0)
chat_json_last_5 = chat_json[-7:-1] # limit chat history to last 5 messages
retrieval_client = QnABot(chat_history=chat_json_last_5, user_name=user_name)
response = await retrieval_client.start(question)
response = format_response(response)
if not response.strip():
raise ValueError("Received an empty response from the assistant")
yield {"event": "message", "data": response}
chat_message = Message(msg_type=Message.TYPE_ASSISTANT, content=response, msg_category=Message.TYPE_QNA)
chat_message.chat = chat
db_client.add(chat_message)
await db_client.commit()
logger.info(response)
except ValueError as e:
logger.error(f"An error occurred in get_information_from_db: {str(e)}")
error = {"event": "error", "data": json.dumps({"detail": str(e)})}
except Exception as e:
logger.info(e)
logger.error(f"An error occurred while retrieving information: {traceback.format_exc()}")
error = {"event": "error", "data": json.dumps({"detail": "Internal server error"})}
finally:
if error:
yield error
yield {"event": "chat_info", "data": json.dumps(chat.to_reference_json())}
yield {"event": "close"}
async def get_base_bot_response(payload):
headers = {"Content-Type": "application/json", "X-API-KEY": settings.base_bot.X_API_KEY}
timeout = aiohttp.ClientTimeout(total=50)
async with aiohttp.ClientSession(timeout=timeout) as session:
try:
async with session.post(settings.base_bot.BASE_BOT_URL, headers=headers, json=payload) as response:
if response.status != 200:
response_text = await response.text()
logger.error(f"Base Bot API Error: status code '{response.status}', response: {response_text}")
raise HTTPException(status_code=response.status, detail="Error contacting QnA API")
async for chunk in response.content.iter_any():
yield chunk.decode('utf-8')
except asyncio.TimeoutError:
logger.error("Timeout error while contacting Base Bot API")
raise HTTPException(status_code=504, detail="Timeout contacting QnA API")
async def narrow_down_intent(chat_id, user_dict, standalone_query, action_id=None, cloud_id=None):
from app.models import Message, Action
logger.info(f"Narrow down intent for chat_id: {chat_id}")
async with db_deps.get_db_session_async_context() as db_client:
messages_obj = (await db_client.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
if not messages_obj:
raise ValueError("No messages found for the given chat_id")
chat_json = [message.to_reference_json() for message in messages_obj]
chat = messages_obj[0].chat
while len(chat_json) >= 6 and chat_json[-6]['type'] == 'Assistant':
chat_json.pop(0)
chat_json_last_5 = chat_json[-6:] # limit chat history to last 5 messages
query = chat_json_last_5[-1]['text'] # query that user just asked
chat_history_str = ''
for chat_ in chat_json_last_5[:-1]: # don't add user latest query in chat history
chat_history_str = (
chat_history_str + f"<{chat_['type'].lower()}>: {chat_['text'].strip()}</{chat_['type'].lower()}>\n")
logger.debug("<<<<<<<<<<<<<<<<<<<<<Chat History>>>>>>>>>>>>>>>>>>>>")
logger.debug(chat_history_str)
logger.debug("<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>")
start_time = datetime.now()
llm_client = AnthropicLLM()
# logger.info(NARROW_DOWN_INTENT.format(chat_history=chat_history_str.strip(), query=query))
logger.info(chat_json)
if len(chat_json) == 1:
response = "[Craft a precise and professional response to the user as a support agent from Wanclouds. Greet user as well as its users first message]"
llm_client.add_message(role=WHISPER_USER_ROLE,
content=NARROW_DOWN_INTENT.format(chat_history=chat_history_str.strip(), query=query,
response=response))
else:
response = "[Craft a precise and professional response to the user as a support agent from Wanclouds. Don't add greetings. Follow previous flow of conversation from chat history]"
llm_client.add_message(role=WHISPER_USER_ROLE,
content=NARROW_DOWN_INTENT.format(chat_history=chat_history_str.strip(), query=query,
response=response))
logger.info(llm_client.messages)
complete_response = ''
action = ''
stream_started = False
try:
async for chunk in llm_client.process_stream():
complete_response += chunk
if 'Task Finished: True' in complete_response:
action = complete_response.split('Intent Narrowed:')[1]
action = action.split('Task Analysis:')[0]
action = action.strip('\n').strip() if action.startswith('\n') else action
pattern = r'^\d+'
match = re.match(pattern, action)
if match:
action = action[match.end():]
logger.info(f"ACTION ---->{action}")
logger.info(f"{complete_response}")
break
if "Response:" in complete_response and not stream_started:
stream_started = True
buffer = complete_response.split("Response:", 1)[1] # Keep only the content after "Response:"
if buffer:
yield {"event": "message", "data": buffer}
continue
if stream_started:
if chunk.startswith('\n'):
yield {"event": "message", "data": "\n"}
if chunk.startswith('\n\n'):
yield {"event": "message", "data": "\n\n"}
yield {"event": "message", "data": chunk}
if chunk.endswith('\n'):
yield {"event": "message", "data": "\n"}
if chunk.endswith('\n\n'):
yield {"event": "message", "data": "\n\n"}
except Exception as e:
logger.error(f"Error during intent narrowing: {e}")
yield {"event": "error", "data": json.dumps({"detail": "Error during intent narrowing"})}
return
logger.info(complete_response)
if not complete_response.strip():
yield {"event": "error", "data": json.dumps({"detail": "Received an empty response from the assistant"})}
return
if action:
chat_history_str = ''
for chat_ in chat_json_last_5[:]: # don't add user latest query in chat history
chat_history_str = (
chat_history_str + f"<{chat_['type'].lower()}>: {chat_['text'].strip()}</{chat_['type'].lower()}>\n")
try:
confidence_scoring_bot = AnthropicLLM()
confidence_scoring_bot.add_message(role=WHISPER_USER_ROLE,
content=CONFIDENCE_SCORING_BOT.format(
chat_history=chat_history_str.strip(),
narrowed_intent=action))
confidence_response = confidence_scoring_bot.process()
logger.info(confidence_response)
confirmation, recommendation = False, None
if 'Overall Confidence: High' in confidence_response:
confirmation = False
else:
confirmation = True
recommendation = confidence_response.split('Recommendation:')[1]
except Exception as e:
logger.error(f"Error during intent narrowing: {e}")
yield {"event": "error", "data": json.dumps({"detail": "Error during intent narrowing"})}
return
action = action.lstrip("- ") if action.startswith("- ") else action
action_tool = similarity_search_api_desc(action)
messages_obj_metadata = json.dumps({
'searched_intent': action_tool,
'task_finished': False,
'stage': 1,
'history': []
})
if confirmation:
chat.confirmation_stage = True
llm_client.add_message(role='assistant', content=complete_response)
if len(chat_json) == 1:
llm_client.add_message(role='user',
content=f'Internal Feedback: Got this feedback from confidence scoring bot whose analyze conversation history and list of intent and provided feedback \n \"{recommendation}\" \n Add greetings, start with a polite greeting. Then, without mentioning the word intent or any internal processes, please ask the user to confirm their specific request or goal. Ensure you only seek confirmation of what they want to do, and do not collect any additional requirements or information. Remember, the user is not aware of our internal workings, so keep the language user-friendly and focused on their needs.')
else:
llm_client.add_message(role='user',
content=f'Internal Feedback: Got this feedback from confidence scoring bot whose analyze conversation history and list of intent and provided feedback \n \"{recommendation}\" \nNow Without mentioning the word intent or any internal processes, please ask the user to confirm their specific request or goal. Ensure you only seek confirmation of what they want to do, and do not collect any additional requirements or information. Remember, the user is not aware of our internal workings, so keep the language user-friendly and focused on their needs.')
llm_client.add_message(role='assistant', content='Response:')
complete_response = llm_client.process()
if 'Response:' in complete_response:
yield {"event": "message", "data": complete_response.split('Response:', 1)[1]}
else:
yield {"event": "message", "data": complete_response}
else:
async with db_deps.get_db_session_async_context() as db_client:
messages_obj = (await db_client.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
messages_obj[-1].json_metadata = messages_obj_metadata
metadata = json.loads(messages_obj[-1].json_metadata)
metadata['initial_query'] = standalone_query
logger.info(metadata["searched_intent"][0])
action_obj = Action(name=metadata["searched_intent"][0], metadata=json.dumps(metadata))
action_id = action_obj.id
dummy_message_content = "This action is performed by another bot. You can start with a fresh conversation or continue with a new context."
dummy_message = Message(
msg_type=Message.TYPE_ASSISTANT,
content=dummy_message_content,
msg_category=Message.TYPE_ACTION,
is_visible=False,
)
dummy_message.chat = messages_obj[0].chat
db_client.add(dummy_message)
messages_obj[-1].action_id = action_obj.id
db_client.add(action_obj)
messages_obj[-1].msg_category = Message.TYPE_ACTION
await db_client.commit()
logger.info("hereeeee1")
yield {"event": "action", "data": json.dumps(action_obj.to_reference_json())}
logger.info(standalone_query)
stream = execute_stage_1(initial_query=standalone_query, user_dict=user_dict,
chat_id=chat_id, action_id=action_id, cloud_id=cloud_id)
async for chunk in stream:
yield chunk
logger.info(chunk)
return
yield {"event": "chat_info", "data": json.dumps(chat.to_reference_json())}
yield {"event": "close"}
end_time = datetime.now()
if 'Response:' in complete_response:
response = complete_response.split('Response:')[1]
else:
response = complete_response
async with db_deps.get_db_session_async_context() as db_client:
chat_message = Message(msg_type=Message.TYPE_ASSISTANT, content=response, msg_category=Message.TYPE_QNA)
chat_message.chat = chat
db_client.add(chat_message)
await db_client.commit()
logger.debug("<<<<<<<<<<<<<<<<<<<<<Intent Phase Response>>>>>>>>>>>>>>>>>>>>")
logger.debug(complete_response)
logger.debug("<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>")
logger.info(f"Total Seconds=>{(end_time - start_time).total_seconds()}")
async def narrow_down_migration_intent(chat_id, user_dict, standalone_query, action_id=None):
from app.models import Message
logger.info(chat_id)
async with db_deps.get_db_session_async_context() as db_client:
messages_obj = (await db_client.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
logger.info(messages_obj)
if not messages_obj:
raise ValueError("No messages found for the given chat_id")
chat_json = [message.to_reference_json() for message in messages_obj]
while len(chat_json) >= 6 and chat_json[-6]['type'] == 'Assistant':
chat_json.pop(0)
chat_json_last_5 = chat_json[-6:] # limit chat history to last 5 messages
query = chat_json_last_5[-1]['text'] # query that user just asked
chat_history_str = ''
for chat_ in chat_json_last_5[:-1]: # don't add user latest query in chat history
chat_history_str = (
chat_history_str + f"<{chat_['type'].lower()}>: {chat_['text'].strip()}</{chat_['type'].lower()}>\n")
logger.info("<<<<<<<<<<<<<<<<<<<<<Chat History>>>>>>>>>>>>>>>>>>>>")
logger.info(chat_history_str)
logger.info("<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>")
llm_client = AnthropicLLM()
llm_client.add_message(role=WHISPER_USER_ROLE,
content=NARROW_DOWN_MIGRATION_INTENT.format(chat_history=chat_history_str.strip(),
query=query))
complete_response = ''
response_content = ''
action = ''
complete_response = llm_client.process()
if 'Task Finished: True' in complete_response:
action = complete_response.split('Intent Narrowed:')[1]
action = action.split('Task Analysis:')[0]
action = action.strip('\n').strip() if action.startswith('\n') else action
pattern = r'^\d+'
match = re.match(pattern, action)
if match:
action = action[match.end():]
action = action.lstrip("- ") if action.startswith("- ") else action
else: # if "Response:" in complete_response:
response_content = complete_response.split("Response:", 1)[1] # Keep only the content after "Response:"
return action, response_content, complete_response
async def confirmation_bot(chat_id):
from app.models import Message
logger.info(chat_id)
async with db_deps.get_db_session_async_context() as db_client:
messages_obj = (await db_client.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
if not messages_obj:
raise ValueError("No messages found for the given chat_id")
chat_json = [message.to_reference_json() for message in messages_obj]
while len(chat_json) >= 6 and chat_json[-6]['type'] == 'Assistant':
chat_json.pop(0)
chat_json_last_5 = chat_json[-6:] # limit chat history to last 5 messages
query = chat_json_last_5[-1]['text'] # query that user just asked
chat_history_str = ''
for chat_ in chat_json_last_5[:-1]: # don't add user latest query in chat history
chat_history_str = (
chat_history_str + f"<{chat_['type'].lower()}>: {chat_['text'].strip()}</{chat_['type'].lower()}>\n")
logger.debug("<<<<<<<<<<<<<<<<<<<<<Chat History>>>>>>>>>>>>>>>>>>>>")
logger.debug(chat_history_str)
logger.debug("<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>")
llm_client = AnthropicLLM()
if len(chat_json) == 1:
llm_client.add_message(role='user',
content=f'Internal Feedback: Add greetings, start with a polite greeting. Then, without mentioning the word intent or any internal processes, please ask the user to confirm their specific request or goal. Ensure you only seek confirmation of what they want to do, and do not collect any additional requirements or information. Remember, the user is not aware of our internal workings, so keep the language user-friendly and focused on their needs.')
else:
llm_client.add_message(role='user',
content=f'Internal Feedback: Now Without mentioning the word intent or any internal processes, please ask the user to confirm their specific request or goal. Ensure you only seek confirmation of what they want to do, and do not collect any additional requirements or information. Remember, the user is not aware of our internal workings, so keep the language user-friendly and focused on their needs.')
confirmation_tool = {
"name": "confirmation_tool",
"description": "This tool reviews users latest reply to confirmation of the values selected by a tool that has recently completed the payload. If the user confirms or agrees with the choices in the summary displayed, the Confirmation is true, but if user doesn't approve then summary then Confirmation is false."
"else False",
"input_schema": {
"type": "object",
"properties": {
"Confirmation": {
"type": "boolean",
"description": "Confirmation flag that is only True when user confirms the details and is ready to move forward. Analyze user last response. Don't get hallucinated by history"
},
"standalone_query":
{
"type": "string",
"description": "Analyze user latest query and chat history to create standalone query in tone like user is saying"
}
},
"required": [
"Confirmation",
"standalone_query"
]
}
}
chat_response = llm_client.process(
system="You are expert whose job is to analyze user current reply to confirmation and decide if user wants to proceed or not",
tools=[confirmation_tool],
force_tool=True,
tool_name="confirmation_tool")
logger.info("*" * 20)
logger.info(chat_response)
logger.info("*" * 20)
confirmation = chat_response['content'][-1]['input'].get("Confirmation")
standalone = chat_response['content'][-1]['input'].get('standalone_query')
if "Confirmation" not in chat_response['content'][-1]['input']:
llm_client.add_message(role=WHISPER_ASSISTANT_ROLE, content=chat_response['content'])
llm_client.add_message(role=WHISPER_USER_ROLE, content=[{
"type": "tool_result",
"tool_use_id": chat_response[-1]['id'],
"content": 'Please generate confirmation field',
"is_error": True
}])
chat_response = llm_client.process(
system="You are expert whose job is to analyze user current reply to confirmation and decide if user wants to proceed or not. Think step in step in <thinking> tags",
tools=[confirmation_tool],
force_tool=True,
tool_name="confirmation_tool")
confirmation = chat_response['content'][-1]['input']["Confirmation"]
standalone = chat_response['content'][-1]['input'].get('standalone_query') if chat_response['content'][-1][
'input'].get('standalone_query') else standalone
if "standalone_query" not in chat_response['content'][-1]['input']:
llm_client.add_message(role=WHISPER_ASSISTANT_ROLE, content=chat_response['content'])
llm_client.add_message(role=WHISPER_USER_ROLE, content=[{
"type": "tool_result",
"tool_use_id": chat_response[-1]['id'],
"content": 'Please generate confirmation field',
"is_error": True
}])
chat_response = llm_client.process(tools=[confirmation_tool],
force_tool=True,
tool_name="confirmation_tool")
confirmation = chat_response['content'][-1]['input'].get("Confirmation") if chat_response['content'][-1][
'input'].get("Confirmation") else confirmation
standalone = chat_response['content'][-1]['input'].get('standalone_query') if chat_response['content'][-1][
'input'].get('standalone_query') else standalone
logger.info(confirmation)
if confirmation:
return True, standalone
return False, ''
def similarity_search_api_desc(query: str, k=1):
from app.main import app
retrieved_docs = app.state.vector_store_index.similarity_search(query, k=k)
method = [retrieved_docs[i].page_content for i in range(k)]
metadata = [retrieved_docs[i].metadata for i in range(k)]
return [method[0], metadata[0]]
# This is where action begins
async def execute_stage_1(initial_query, chat_id, user_dict, action_id, cloud_type=None, cloud_id=None):
"""
Executes the first stage of an action task based on the user's initial query.
This function is called when a bot finalizes user intent, such as "create a VPC backup" or "delete a COS instance",
which was initially queried from a vector database.
Parameters:
- initial_query (str): The initial query or command from the user.
- db_client (DatabaseClient): The database client used to interact with the database.
- chat_id (str): The ID of the chat session.
- bearer (str): The bearer token for authentication.
- action_id (str): The ID of the action to be executed.
Steps:
1. Retrieve the conversation history for the given chat session using `chat_history_qry.get_chat_history`.
2. Parse the metadata from the conversation history to extract the searched intent and message history.
3. Construct a JSON object representing the chat without including messages.
4. Record the start time for execution timing purposes.
5. Initialize the `ActionPhase` bot with the searched intent and message history.
6. Execute the intent using `intent_execution_bot.start` with the initial query.
7. Parse the bot's response to determine if the task has been finished.
8. Update the metadata based on the task's completion status. If finished, reset the stage, history, and searched intent.
9. Save the updated metadata back to the database.
10. Record the end time and log the total execution duration.
11. Create a new message request object with the bot's response and add it to the chat.
12. Yield the bot's response and chat information as events for the client.
Yields:
- A message event with the bot's response.
- A chat_info event with the updated chat JSON.
- A close event indicating the end of the process.
"""
logger.info("hereeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee")
from app.models import Message, Action, Chat, ActivityTracking
try:
async with db_deps.get_db_session_async_context() as db_session:
result = await db_session.scalars(
select(Action).filter(Action.id == action_id).options(selectinload(Action.messages)))
action = result.unique().one_or_none()
if not action:
raise ValueError("Action not found")
logger.info(action)
action_json = [message.to_reference_json() for message in action.messages[:-1]]
logger.info(action_json)
metadata_dict = json.loads(action.json_metadata)
if not metadata_dict:
raise ValueError("Metadata is empty or invalid")
logger.info(metadata_dict)
logger.info(metadata_dict.get('history'))
searched_intent = metadata_dict["searched_intent"]
if not searched_intent:
raise ValueError("Searched intent not found in metadata")
yield {"event": "action", "data": json.dumps(action.to_reference_json())}
logger.info(user_dict)
complex_bot_action = isinstance(searched_intent[-1]['method']['tool'], list)
if complex_bot_action:
intent_execution_bot = ComplexActionPhaseClaude(intent=searched_intent,
chat_history=metadata_dict.get('history'),
user_id=user_dict['id'],
bearer=user_dict['bearer'],
metadata=metadata_dict,
cloud_id=cloud_id)
elif searched_intent[-1]['method']['tool']['name'] == 'post_migration_request':
intent_execution_bot = MigrationActionPhaseClaude(intent=searched_intent,
chat_history=metadata_dict.get('history'),
user_dict=user_dict, bearer=user_dict['bearer'],
metadata_dict=metadata_dict, action_id=action_id,
cloud_id=cloud_id)
else:
intent_execution_bot = ActionPhaseClaude(intent=searched_intent,
chat_history=metadata_dict.get('history'),
user_id=user_dict['id'],
bearer=user_dict['bearer'],
metadata_dict=metadata_dict,
cloud_id=cloud_id
)
start_time = datetime.now()
response_obj = await intent_execution_bot.start(initial_query, chat_id=chat_id, action_id=action_id)
if complex_bot_action:
metadata_dict.update(intent_execution_bot.get_metadata())
complete_response = ""
if isinstance(response_obj, AsyncGeneratorType):
async for chunk in response_obj:
try:
data = "\n" if not chunk else chunk
yield {"event": "message", "data": data}
complete_response += data
except Exception as e:
logger.error(f"Error processing chunk: {str(e)}")
continue
else:
# If response is a string
response_obj1 = response_obj.replace('```','')
logger.info('printing type of response_obj')
logger.info(type(response_obj1))
marker = "Output Formatted Result:"
if marker in response_obj1:
response_obj1 = response_obj1.split(marker, 1)[-1].strip()
yield {"event": "message", "data": response_obj1}
complete_response = response_obj1
end_time = datetime.now()
logger.info(f"Total Seconds=>{(end_time - start_time).total_seconds()}")
#TODO: Handle the formatting incase of error
# intent_execution_bot.base_llm.messages[-1]["content"]= complete_response
logger.info("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
metadata_dict['history'] = intent_execution_bot.base_llm.messages
logger.info(metadata_dict['history'])
async with db_deps.get_db_session_async_context() as db_client:
action.json_metadata = json.dumps(metadata_dict)
await db_client.commit()
action_message = Message(msg_type=Message.TYPE_ASSISTANT,
content=complete_response.replace('Response:', ''),
msg_category=Message.TYPE_ACTION)
action_message.action = action
db_client.add(action_message)
action_message.action = action
db_client.add(action_message)
await db_client.commit()
except ValueError as e:
logger.error(f"An error occurred in execute_stage_1: {str(e)}")
yield {"event": "error", "data": json.dumps({"detail": str(e)})}
finally:
async with db_deps.get_db_session_async_context() as db_client:
activities = (await db_client.scalars(select(ActivityTracking).filter(
ActivityTracking.action_id == action_id))).all()
if activities:
for activity in activities:
if activity.is_polled == False:
yield {"event": "task", "data": json.dumps(await activity.to_event_json(db_client))}
activity.is_polled = True
yield {"event": "action", "data": json.dumps(action.to_reference_json())}
async with db_deps.get_db_session_async_context() as db_client:
chat = (await db_client.scalars(select(Chat).filter(Chat.id == chat_id))).one_or_none()
if chat:
yield {"event": "chat_info", "data": json.dumps(chat.to_reference_json())}
else:
logger.warning(f"Chat with ID {chat_id} not found.")
yield {"event": "close"}
async def get_information_from_db(user_id, question, chat_id, cloud_id=None,
cloud_type=None):
from app.whisper.utils.information_retrieval_engine.base import RetrievalPhaseClaude
from app.models import Message
error = None
import traceback
try:
async with db_deps.get_db_session_async_context() as db_client:
messages_obj = (await db_client.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
if not messages_obj:
raise ValueError("No messages found for the given chat_id")
chat_json = [message.to_reference_json() for message in messages_obj]
chat = messages_obj[0].chat
while len(chat_json) >= 6 and chat_json[-6]['type'] == 'Assistant':
chat_json.pop(0)
chat_json_last_5 = chat_json[-7:-1] # limit chat history to last 5 messages
retrieval_client = RetrievalPhaseClaude(chat_history=chat_json_last_5, user_id=user_id['id'],
llm_chat_history=chat.json_metadata, bearer=user_id['bearer'],
chat_id=chat_id, cloud_id=cloud_id, cloud_type=cloud_type
)
response_obj = await retrieval_client.start(question)
complete_response = ""
logger.info('printing response obj')
logger.info(response_obj)
if isinstance(response_obj, types.AsyncGeneratorType):
logger.info('in the async generator')
async for chunk in response_obj:
try:
# data = "" if not chunk else chunk
# data.replace('\n', '<br>')
logger.info(chunk)
yield {"event": "message", "data": chunk}
complete_response += chunk
except Exception as e:
logger.error(f"Error processing chunk: {str(e)}")
continue
else:
# If response is a string
response_obj1 = response_obj.replace('```','')
logger.info('printing type of response_obj')
logger.info(type(response_obj1))
marker = "Output Formatted Result:"
if marker in response_obj1:
response_obj1 = response_obj1.split(marker, 1)[-1].strip()
yield {"event": "message", "data": response_obj1}
complete_response = response_obj1
logger.info("*******************************FORMATTED CONTENT**************")
logger.info(complete_response)
logger.info("*******************************FORMATTED CONTENT**************")
async with db_deps.get_db_session_async_context() as db_client:
chat_message = Message(
msg_type=Message.TYPE_ASSISTANT,
content=complete_response,
msg_category=Message.TYPE_QNA
)
chat_message.chat = chat
chat.json_metadata = retrieval_client.base_llm.messages
logger.info(retrieval_client.base_llm.messages)
db_client.add(chat_message)
await db_client.commit()
except ValueError as e:
logger.error(f"An error occurred in get_information_from_db: {str(e)}")
error = {"event": "error", "data": json.dumps({"detail": str(e)})}
except Exception as e:
logger.error(f"An error occurred while retrieving information: {traceback.format_exc()} ->{str(e)}")
error = {"event": "error", "data": json.dumps({"detail": "Internal server error"})}
finally:
if error:
yield error
yield {"event": "chat_info", "data": json.dumps(chat.to_reference_json())}
yield {"event": "close"}
def format_response(response: str):
if response is not None:
return response.replace('|', '| ')
else:
return ''
async def check_cloud_account_status(chat_id, api_endpoint, tool,
authorization: HTTPAuthorizationCredentials):
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {authorization.credentials.strip('')}"
}
base_url = f'{settings.web.AUTH_LINK}{api_endpoint}'
try:
async with httpx.AsyncClient() as client:
response = await client.get(
f'{base_url}',
headers=headers,
timeout=10
)
response.raise_for_status()
logger.info(f"Response Status Code: {response.status_code}")
payload = response.json() if response.content else None
if tool == "ClassicMigration":
cloud_message = IBM_CLASSIC_CLOUD_ACCOUNT_MESSAGE
elif tool == "Action":
cloud_message = IBM_CLOUD_ACCOUNT_MESSAGE
elif tool == "ScheduleCall":
cloud_message = GENERAL_CLOUD_ACCOUNT_MESSAGE
elif tool == "InformationRetrievalClassic":
cloud_message = IBM_CLASSIC_CLOUD_ACCOUNT_MESSAGE
elif tool == "InformationRetrievalAction":
cloud_message = IBM_CLOUD_ACCOUNT_MESSAGE
else:
cloud_message = "Please check your cloud accounts to ensure they are properly configured and valid."
return cloud_message
if not payload:
if tool in ["InformationRetrievalClassic", "InformationRetrievalAction"]:
return cloud_message
else:
return EventSourceResponse(api_key_msg_event_generator(
api_key_msg=cloud_message.format(
vpcplus_url=f"{base_url}",
cloud_whisper_url=f"{settings.web.BACKEND_URI}{api_endpoint}"
),
chat_id=chat_id
))
cloud_accounts = payload.get('items', [])
logger.info(f"Retrieved cloud accounts: {cloud_accounts}")
is_cloud_account_valid = any(account.get('status') == 'VALID' for account in cloud_accounts)
if not is_cloud_account_valid:
logger.info("No valid cloud accounts found.")
return EventSourceResponse(api_key_msg_event_generator(
api_key_msg="The cloud account status is currently invalid. Please check your cloud account and ensure it is properly configured and valid.",
chat_id=chat_id,
))
except httpx.RequestError as e:
logger.error(f"Error fetching cloud accounts: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error fetching cloud accounts: {str(e)}")
return None
|
CloudWhisperCustomBot | app/web/common/db_deps.py | import asyncio
from typing import AsyncGenerator
from contextlib import asynccontextmanager
from loguru import logger
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import sessionmaker
from app.core.config import settings
AsyncSessionLocal = sessionmaker(
bind=create_async_engine(
settings.db.SQLALCHEMY_DATABASE_URI,
pool_recycle=settings.db.SQLALCHEMY_POOL_RECYCLE,
pool_timeout=settings.db.SQLALCHEMY_POOL_TIMEOUT,
pool_size=settings.db.SQLALCHEMY_POOL_SIZE,
max_overflow=settings.db.SQLALCHEMY_MAX_OVERFLOW,
),
class_=AsyncSession,
expire_on_commit=False,
autocommit=False,
autoflush=False
)
async def get_db_session_async() -> AsyncGenerator[AsyncSession, None]:
try:
async with AsyncSessionLocal() as session:
logger.success("Success: connection to the database")
yield session
except Exception:
await session.rollback()
raise
finally:
logger.info("Closing connection to the database")
await session.close()
@asynccontextmanager
async def get_db_session_async_context() -> AsyncGenerator[AsyncSession, None]:
try:
async with AsyncSessionLocal() as session:
logger.success("Success: connection to the database")
yield session
except Exception:
await session.rollback()
raise
finally:
logger.info("Closing connection to the database")
await session.close()
def get_sync_session():
loop = asyncio.get_event_loop()
async_session = AsyncSessionLocal()
return loop.run_until_complete(async_session.__aenter__())
|
CloudWhisperCustomBot | app/web/common/deps.py | import httpx
from fastapi import Depends, Header, HTTPException, WebSocketException
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from httpx import Response
from loguru import logger
from app.api_discovery.utils import update_profile_with_vpcplus_api_key
from app.core.config import settings
from app.web.common import db_deps
from sqlalchemy import select
from app import models
security = HTTPBearer()
async def authenticate_user(
project_id: str = Header(None, convert_underscores=False),
authorization: HTTPAuthorizationCredentials = Depends(security)
) -> Response | dict:
from app.web.profiles.schemas import OnboardingStatus
if authorization.scheme != "Bearer":
raise HTTPException(status_code=401, detail="Invalid authentication scheme")
headers = {'Authorization': f"Bearer {authorization.credentials}"}
if project_id:
headers["project_id"] = project_id
async with httpx.AsyncClient(timeout=30.0) as http_client:
resp = await http_client.get(f"{settings.web.AUTH_LINK}/v1/users/verify", headers=headers)
if not resp or resp.status_code != 200:
raise HTTPException(status_code=401)
if project_id:
data = resp.json()
data["project_id"] = project_id
return data
user_json = resp.json()
async with db_deps.get_db_session_async_context() as session:
result = await session.execute(select(models.Profile).filter_by(user_id=user_json['id']).limit(1))
profile = result.scalar_one_or_none()
logger.info(f"user: {profile}")
if not profile:
profile = models.Profile(
user_id=user_json["id"], name=user_json["name"], project_id=user_json.get("project_id", ""),
is_admin=bool(user_json.get("is_admin") or False), email=user_json["email"], onboarding=OnboardingStatus.app_tour
)
logger.info(profile.to_json())
session.add(profile)
await session.commit()
result = await session.execute(select(models.Profile).filter_by(user_id=user_json['id']).limit(1))
profile = result.scalar_one_or_none()
logger.info(f"{dir(profile)}")
user_json["uuid"] = profile.id
user_json["id"] = profile.user_id
user_json["onboarding"] = profile.onboarding
# If User has already VPC+ API Key created, fetch it from Auth Service and store it in order to run discovery
headers = {"Authorization": f"Bearer {authorization.credentials}"}
with httpx.Client() as http_client:
resp = http_client.get(f"{settings.web.AUTH_LINK}/v1/users/api_key", headers=headers)
if resp.status_code == 200:
api_key_json = resp.json()
api_key = api_key_json.get("key")
api_key_name = api_key_json.get("name")
api_key_expiry = api_key_json.get("expires_at")
async with db_deps.get_db_session_async_context() as session:
await update_profile_with_vpcplus_api_key(
profile_id=user_json["id"],
api_key=api_key, api_key_name=api_key_name, api_key_expiry=api_key_expiry if api_key_expiry else None
)
user_json["bearer"] = f"Bearer {authorization.credentials}"
user_json["appearance"] = profile.appearance if profile.appearance else None
return user_json
async def first_message_handler(websocket) -> Response | dict:
from app.web.profiles.schemas import OnboardingStatus
token = await websocket.receive_text()
headers = {'Authorization': f"Bearer {token}"}
async with httpx.AsyncClient() as http_client:
resp = await http_client.get(f"{settings.web.AUTH_LINK}/v1/users/verify", headers=headers)
# if user is None:
if not resp or resp.status_code != 200:
raise WebSocketException(code=1008, reason="Policy Violation, User not found") #returns the control and closes the connection
user_json = resp.json()
async with db_deps.get_db_session_async_context() as session:
result = await session.execute(select(models.Profile).filter_by(user_id=user_json['id']).limit(1))
profile = result.scalar_one_or_none()
logger.info(f"user: {profile}")
if not profile:
profile = models.Profile(
user_id=user_json["id"], name=user_json["name"], project_id=user_json.get("project_id", ""),
is_admin=bool(user_json["is_admin"]), email=user_json["email"], onboarding=OnboardingStatus.app_tour
)
logger.info(profile.to_json())
session.add(profile)
await session.commit()
result = await session.execute(select(models.Profile).filter_by(user_id=user_json['id']).limit(1))
profile = result.scalar_one_or_none()
logger.info(f"{dir(profile)}")
user_json["uuid"] = profile.id
user_json["id"] = profile.user_id
user_json["onboarding"] = profile.onboarding
# If User has already VPC+ API Key created, fetch it from Auth Service and store it in order to run discovery
with httpx.Client() as http_client:
resp = http_client.get(f"{settings.web.AUTH_LINK}/v1/users/api_key", headers=headers)
if resp.status_code == 200:
api_key_json = resp.json()
api_key = api_key_json.get("key")
api_key_name = api_key_json.get("name")
api_key_expiry = api_key_json.get("expires_at")
async with db_deps.get_db_session_async_context() as session:
await update_profile_with_vpcplus_api_key(
profile_id=user_json["id"],
api_key=api_key, api_key_name=api_key_name, api_key_expiry=api_key_expiry if api_key_expiry else None
)
user_json["bearer"] = f"Bearer {token}"
return user_json
|
CloudWhisperCustomBot | app/web/common/api_path_to_fields.json | {
"Create IBM VPC backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/vpcs": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM Clouds": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM VPC Networks": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/vpcs": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM Regions": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
}
},
"List IBM Draas backups": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/draas_blueprints": {
"GET": {
"fields": [
"id",
"name",
"backups"
],
"nested_fields": {
"backups": [
"id",
"name"
]
}
}
}
},
"Create IBM COS bucket backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/cloud_object_storages/buckets": {
"GET": {
"fields": [
"id",
"name",
"cloud_object_storage",
"cos_bucket_versioning",
"regions"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM COS buckets": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/cloud_object_storages/buckets": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"list IBM COS bucket instances": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM Kubernetes Clusters": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/kubernetes_clusters": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"Create IBM IKS Cluster backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/kubernetes_clusters": {
"GET": {
"fields": [
"id",
"name",
"master_kube_version"
]
}
},
"v1/ibm/kubernetes_clusters/temp": {
"GET": {
"fields": [
"id",
"workloads"
]
}
},
"v1/ibm/cloud_object_storages/buckets": {
"GET": {
"fields": [
"id",
"name",
"cloud_object_storage",
"cos_bucket_versioning",
"regions"
]
}
},
"v1/ibm/cloud_object_storages/keys": {
"GET": {
"fields": [
"id",
"name",
"is_hmac"
]
}
}
},
"List IBM COS bucket credential keys": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/cloud_object_storages/keys": {
"GET": {
"fields": [
"id",
"name",
"is_hmac"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List a single IBM Kubernetes Cluster": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name",
"zones"
]
}
},
"v1/ibm/kubernetes_clusters": {
"GET": {
"fields": [
"id",
"name",
"master_kube_version"
]
}
},
"v1/ibm/kubernetes_clusters/temp": {
"GET": {
"fields": [
"id",
"workloads"
]
}
}
},
"List all IBM VSI Instances": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name",
"zones"
]
}
},
"v1/ibm/instances": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"Create IBM VSI backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name",
"zones"
]
}
},
"v1/ibm/instances": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/backup_policies": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List all IBM Backup Policies": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/backup_policies": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List a single IBM VSI": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name",
"zones"
]
}
},
"v1/ibm/instances": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/instances/temp": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"Create scheduled IBM VSI backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/instances": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/backup_policies": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"Restore IBM IKS Cluster backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/draas_blueprints": {
"GET": {
"fields": [
"id",
"name",
"backups",
"resource_metadata"
],
"nested_fields": {
"backups": [
"id",
"name"
]
}
}
},
"v1/ibm/kubernetes_clusters": {
"GET": {
"fields": [
"id",
"name",
"master_kube_version"
]
}
}
},
"Restore IBM IKS Cluster backup in existing IBM VPC": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/draas_blueprints": {
"GET": {
"fields": [
"id",
"name",
"backups"
],
"nested_fields": {
"backups": [
"id",
"name"
],
"resource_metadata": [
"cluster_id",
"blueprint_name"
]
}
}
},
"v1/ibm/vpcs": {
"GET": {
"fields": [
"id",
"name"
],
"nested_fields": {
"associated_resources": [
"subnets"
]
}
}
},
"v1/ibm/resource_groups": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/subnets": {
"GET": {
"fields": [
"id",
"name",
"zone"
]
}
}
}
} |
CloudWhisperCustomBot | app/web/common/cloud_setup_instruction_messages.py | API_KEY_MESSAGE ="""Cloud Whisper requires a VPC+ API key to discover data and perform actions. Please follow these steps to create your API key:\n
1. Create your VPC+ API Key: \n \t \n
a. Click on User name in the bottom left corner and select Settings \n \t \n
b. Navigate to the "API Key" section \n \t \n
c. Create a new API key:\n \t \n
- Provide a name for your key\n \t \n
- Add a description\n \t \n
- Set an expiration time (optional)\n\n
Once completed, Cloud Whisper will be able to access the necessary VPC+ data for its operations.\n\n
If you encounter any issues during this process, please don't hesitate to contact our support team.
"""
IBM_CLASSIC_CLOUD_ACCOUNT_MESSAGE = """Cloud Whisper requires a connected IBM Classic Cloud account to discover data and perform actions. Please follow these steps to add your IBM Classic Cloud account:\n
1. Add your IBM Classic Cloud Account: \n \t \n
a. Select IBM Classic Cloud Accounts on the left bottom corner of the interface.\n \t \n
b. Click on Add Account.\n \t \n
c. Fill in the Name, Username and API key.\n \t \n
d. Click Add to create and save your IBM Classic Cloud account.\n\n
Once completed, Cloud Whisper will be able to access the necessary data for its operations.\n\n
If you encounter any issues during this process, please contact our support team for assistance.
"""
IBM_CLOUD_ACCOUNT_MESSAGE = """Cloud Whisper requires a connected IBM Cloud account to discover data and perform actions. Please follow these steps to add your IBM Cloud account:\n
1. Add your IBM Cloud Account:\n \t \n
a. Select IBM Cloud Accounts on the left bottom corner of the interface.\n \t \n
b. Click on Add Account.\n \t \n
c. Fill in the Name and API key.\n \t \n
d. Click Add to create and save your cloud account.\n\n
Once completed, Cloud Whisper will be able to access the necessary data for its operations.\n\n
If you encounter any issues during this process, please contact our support team for assistance.
"""
GENERAL_CLOUD_ACCOUNT_MESSAGE = """Cloud Whisper requires a connected IBM Cloud and IBM Classic Cloud account to discover data and perform actions. Please follow these steps to add your account:\n
1. Add your IBM Cloud and IBM Classic Cloud Account: \n \t \n
a. Select IBM Cloud Accounts and IBM Classic Cloud Accounts from the bottom left corner of the interface.\n \t \n
b. Click on Add Account.\n \t \n
c. For IBM Cloud accounts, fill in the Name and API key. For IBM Classic Cloud accounts, fill in the Name, Username, and API key.\n \t \n
d. Click Add to create and save your account.\n\n
Once completed, Cloud Whisper will be able to access the necessary data for its operations.\n\n
If you encounter any issues during this process, please contact our support team for assistance.
"""
|
CloudWhisperCustomBot | app/web/activity_tracking/neo4j_query.py | get_vpc_backups_query = """
MATCH (v:VPC)
OPTIONAL MATCH (b:VPCBackup {name: v.name})
WITH v, b
WHERE b IS NULL AND v.cloud_id = '{cloud_id}'
RETURN v.cloud_id, v.name
"""
get_iks_backups_query = """
MATCH (v:KubernetesCluster)
OPTIONAL MATCH (b:IKSBackupDetails {name: v.name})
WITH v, b
WHERE b IS NULL AND v.cloud_id = '{cloud_id}'
RETURN v.cloud_id, v.name
"""
get_cos_backups_query = """
MATCH (v:COSBucket)
OPTIONAL MATCH (b:COSBucketBackupDetails {name: v.name})
WITH v, b
WHERE b IS NULL AND v.cloud_id = '{cloud_id}'
RETURN v.cloud_id, v.name
"""
get_vsi_backups_query = """
MATCH (v:VirtualServerInstance)
OPTIONAL MATCH (b:VirtualServerInstanceBackup {name: v.name})
WITH v, b
WHERE b IS NULL AND v.cloud_id = '{cloud_id}'
RETURN v.cloud_id, v.name
"""
|
CloudWhisperCustomBot | app/web/activity_tracking/__init__.py | from .api import activity_tracking_n_recommendations
__all__ = ["activity_tracking_n_recommendations"]
|
CloudWhisperCustomBot | app/web/activity_tracking/api.py | from math import ceil
from typing import List, Optional
from typing import Annotated
from fastapi import APIRouter, Depends, HTTPException, Query
from sqlalchemy.ext.asyncio import AsyncSession
from loguru import logger
from sqlalchemy import select
from pydantic import conint
from sqlalchemy.orm import undefer
from sqlalchemy import desc, func
from app import models
from app.core.config import settings
from app.web.activity_tracking.neo4j_query import get_vpc_backups_query, get_iks_backups_query, get_cos_backups_query, \
get_vsi_backups_query
from app.web.common import deps, db_deps
from app.whisper.utils.neo4j.client import Neo4j
activity_tracking_n_recommendations = APIRouter()
@activity_tracking_n_recommendations.get("", name="Get Activities (Backup and Restore) Performed & Recommendations")
async def get_activity_n_recommendations(
cloud_id: str,
user=Depends(deps.authenticate_user),
recommendation: bool = None,
status: Annotated[list[str] | None, Query()] = None,
cloud: Annotated[list[str] | None, Query()] = None,
start: conint(ge=1) = 1,
limit: conint(ge=1, le=settings.pagination_config.MAX_PAGE_LIMIT) = settings.pagination_config.DEFAULT_LIMIT
):
from app.main import app
response = []
# Only add recommendations if it's the first page
if start == 1 and recommendation:
neo4j_client = Neo4j(db_session=app.state.neo4j_session, user_id=user["id"])
# Add VPC backup recommendation
vpc_query = get_vpc_backups_query.replace("{cloud_id}", cloud_id)
vpc_result = neo4j_client.query_database(vpc_query)
if vpc_result:
vpc_backup_recommendation_dict = {
'type': 'recommendation',
'cloud': 'ibm',
'status': 'info',
'title': 'You have multiple VPCs which are not backed up, would you like to back them up?',
'prompt': 'How many VPCs do I have which are not backed up? I need to back them up',
}
response.append(vpc_backup_recommendation_dict)
# Add IKS backup recommendation
iks_query = get_iks_backups_query.replace("{cloud_id}", cloud_id)
iks_result = neo4j_client.query_database(iks_query)
if iks_result:
iks_backup_recommendation = {
'type': 'recommendation',
'cloud': 'ibm',
'status': 'info',
'title': 'You have multiple IKS clusters which are not backed up, would you like to back them up?',
'prompt': 'How many IKS clusters do I have which are not backed up? I need to back them up',
}
response.append(iks_backup_recommendation)
# Add COS Buckets backup recommendation
cos_buckets_query = get_cos_backups_query.replace("{cloud_id}", cloud_id)
cos_result = neo4j_client.query_database(cos_buckets_query)
if cos_result:
cos_buckets_backup_recommendation = {
'type': 'recommendation',
'cloud': 'ibm',
'status': 'info',
'title': 'You have multiple COS Buckets which are not backed up, would you like to back them up?',
'prompt': 'How many COS Buckets do I have which are not backed up? Can you show them?',
}
response.append(cos_buckets_backup_recommendation)
# Add VSI backup recommendation
vsi_query = get_vsi_backups_query.replace("{cloud_id}", cloud_id)
vsi_result = neo4j_client.query_database(vsi_query)
if vsi_result:
vsi_backup_recommendation = {
'type': 'recommendation',
'cloud': 'ibm',
'status': 'info',
'title': 'You have multiple Virtual Server Instances (VSIs) which are not backed up, would you like to back them up?',
'prompt': 'How many Virtual Server Instances (VSIs) do I have which are not backed up? I need to back them up',
}
response.append(vsi_backup_recommendation)
async with db_deps.get_db_session_async_context() as db_session:
# Pagination logic for activities
total = await db_session.scalar(select(func.count(models.ActivityTracking.id)).filter_by(user_id=user["id"]))
pages = ceil(total / limit)
if start > pages:
start = 1
offset = (start - 1) * limit
filters = {"user_id": user["id"]}
# TODO: Will have to add the cloud filter as well when other cloud like (Softlayer or AWS etc.) comes in
if status:
activities = (await db_session.scalars(
select(models.ActivityTracking).filter(models.ActivityTracking.status.in_(status)).filter_by(**filters)
.options(undefer(models.ActivityTracking.fe_request_data))
.order_by(desc(models.ActivityTracking.created_at)).offset(offset).limit(limit)
)).all()
else:
activities = (await db_session.scalars(
select(models.ActivityTracking).filter_by(**filters)
.options(undefer(models.ActivityTracking.fe_request_data))
.order_by(desc(models.ActivityTracking.created_at)).offset(offset).limit(limit)
)).all()
if activities:
for activity in activities:
action = (
await db_session.scalars(
select(models.Action).filter(models.Action.id == activity.action_id))).one_or_none()
activity_dict = {
'type': 'action',
'cloud': 'ibm',
'status': activity.status,
'title': f"{action.name} of {activity.resource_name}",
'json': await activity.to_json(db_session)
}
response.append(activity_dict)
return {
"items": response,
"previous": start - 1 if start > 1 else None,
"next": start + 1 if start < pages else None,
"pages": pages,
"total": total
}
@activity_tracking_n_recommendations.get("/{workflow_id}", name="Get a Workflow by ID")
async def get_activity(
workflow_id: str,
user=Depends(deps.authenticate_user),
):
async with db_deps.get_db_session_async_context() as db_session:
activity = (await db_session.scalars(select(models.ActivityTracking).filter(
models.ActivityTracking.workflow_id == workflow_id).options(undefer(models.ActivityTracking.fe_request_data)))
).one_or_none()
if not activity:
logger.error(f"No activity found with ID: {workflow_id}")
raise HTTPException(status_code=404, detail=f"No activity found with ID: {workflow_id}")
return await activity.to_json(db_session)
|
CloudWhisperCustomBot | app/web/chats/schemas.py | import datetime
import typing as t
import uuid
from enum import Enum
from pydantic import BaseModel, Field
class MessageTypeEnum(str, Enum):
Human = 'Human'
Assistant = 'Assistant'
class ChatTypeEnum(str, Enum):
QnA = 'QnA'
Action = 'Action'
class MessageType(BaseModel):
type: MessageTypeEnum
class MessageRequest(BaseModel):
text: str
type: t.Optional[str] = "Human"
class MessageResponse(BaseModel):
id: uuid.UUID
sent_at: datetime.datetime
text: str
type: MessageType
class ChatRequest(BaseModel):
question: str
chat_id: t.Optional[str] = Field(default=None, max_length=32, min_length=32, regex="^[0-9a-fA-F]+$")
action_id: t.Optional[str] = Field(default=None, max_length=32, min_length=32, regex="^[0-9a-fA-F]+$")
regenerate: t.Optional[bool] = Field(default=False)
cloud_account_id: t.Optional[str] = Field(default=None)
cloud_type: t.Optional[str] = Field(default=None)
class GenerateTitleRequest(BaseModel):
message_id: uuid.UUID
class UpdateChatRequest(BaseModel):
is_visible: t.Optional[bool]
title: t.Optional[str]
metadata: t.Optional[dict]
class StreamConversationRequest(BaseModel):
message_id: uuid.UUID
|
CloudWhisperCustomBot | app/web/chats/__init__.py | from .api import whisper_chats
__all__ = ["whisper_chats"]
|
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
- Downloads last month
- 80