repo_id
stringclasses 1
value | file_path
stringlengths 8
82
| content
stringlengths 23
44.3k
|
---|---|---|
CloudWhisperCustomBot | Makefile | # List of targets and their descriptions
.PHONY: help
help:
@echo "Note: The following commands will change the status of 'cloud_whisper' database.\n"
@echo "Available targets:"
@echo " migrate Create a new Migration File."
@echo " upgrade Upgrade to a later version."
@echo " downgrade Revert to a previous version."
@echo " head View current revision/version."
migrate:
@echo "Populate revision script with candidate migration operations, based on comparison of database to model."
@docker exec cloud_whisper_web bash -c "alembic revision --autogenerate"
upgrade:
echo "Upgrade to a later version."
@docker exec cloud_whisper_web bash -c "alembic upgrade head"
downgrade:
echo "Revert to a previous version."
@docker exec cloud_whisper_web bash -c "alembic downgrade -1"
head:
echo "View current revision/version"
@docker exec cloud_whisper_web bash -c "alembic current"
|
CloudWhisperCustomBot | Dockerfile | # Dockerfile
FROM python:3.11-slim-bullseye
# Set the working directory
WORKDIR /CloudWhisperCustomBot
COPY requirements.txt .
RUN apt-get update
RUN apt-get install -y build-essential
RUN apt-get install -y dumb-init
RUN apt-get install -y curl
RUN apt-get install -y lsb-release
RUN apt-get install -y wget
RUN apt-get install -y cmake
RUN apt-get install -y libpq-dev gcc python3-dev
RUN pip3 install --upgrade pip && pip3 install -r requirements.txt
# Copy the app source code
COPY . /CloudWhisperCustomBot
RUN chmod 755 /CloudWhisperCustomBot/scripts/dev.sh
RUN chmod 755 /CloudWhisperCustomBot/scripts/dev.sh
ENV PYTHONPATH=/CloudWhisperCustomBot
ENTRYPOINT ["dumb-init", "--"]
CMD ["/bin/bash", "-c", "/CloudWhisperCustomBot/scripts/dev.sh"]
|
CloudWhisperCustomBot | docker-compose.yml | services:
cloud_whisper_fe:
container_name: cloud_whisper_fe
build: ../cloud-whisper-frontend
command: npm run build
environment:
REACT_APP_API_URL: https://cloudwhisper-stage.wanclouds.ai/
REACT_APP_AUTH_REDIRECT_URI: https://cloudwhisper-stage.wanclouds.ai/users/wc/callback
REACT_APP_WEBSOCKETS_STATUS: enabled
REACT_APP_WEBSOCKETS_URL: wss://cloudwhisper-stage.wanclouds.ai/v1/whisper/websockets/whisper-inference
REACT_APP_GOOGLE_AUTH_SSO_REDIRECTION_URI: https://cloudwhisper-stage.wanclouds.ai/
REACT_APP_GOOGLE_AUTH_SSO_STATUS: enabled
REACT_APP_WANCLOUDS_AUTH_SSO_STATUS: disabled
REACT_APP_PAYMENT_CALLBACK_URI_PATH: https://cloudwhisper-stage.wanclouds.ai
REACT_APP_DRAAS_BOT_STATUS: 'enabled'
REACT_APP_DRAAS_BOT_URI: https://cloudwhisper-stage.wanclouds.ai/ #THIS IS THE MAIN PAGE COMMENT OUT IF NOT NEEDED
REACT_APP_AUTH_URL: https://accounts-stage.wanclouds.net/
ports:
- "3000:3000"
networks:
- cloud_whisper_custom_bot
cloud_whisper_web:
container_name: cloud_whisper_web
env_file:
- "./.env.web"
- "./.env.aws_configurations"
- "./.env.anthropic_apikey"
- "./.env.postgres"
- "./.env.groq_apikey"
- "./.env.base_bot_secrets"
- "./.env.neo4j"
restart: always
build:
context: .
dockerfile: Dockerfile
image: cloud_whisper_custom_web
environment:
BASE_BOT_URL: "https://wanclouds.ai/v1/whisper/bots/{BASE_BOT_ID}/qna_chats"
AUTH_LINK: https://vpc-stage.wanclouds.net
BACKEND_URI: https://cloudwhisper-stage.wanclouds.ai
ports:
- "8008:8008"
expose:
- "8008"
depends_on:
- postgresdb
volumes:
- ./app:/CloudWhisperCustomBot/app
- ./migrations:/CloudWhisperCustomBot/migrations
- ./cache:/CloudWhisperCustomBot/cache
- ./cache/huggingface:/root/.cache/huggingface
networks:
- cloud_whisper_custom_bot
nginx:
image: wancloudsinc/doosra-vpc-nginx:latest
container_name: nginx
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
networks:
- cloud_whisper_custom_bot
qdrant:
image: qdrant/qdrant:v1.11.3
container_name: qdrant
ports:
- "6333:6333"
- "6334:6334"
volumes:
- ./qdrant_data:/qdrant/storage
networks:
- cloud_whisper_custom_bot
neo4j:
image: neo4j:5.19.0
container_name: neo4j
ports:
- "7474:7474"
- "7687:7687"
volumes:
- ./app:/CloudWhisperCustomBot/app/neo4jdata
environment:
- NEO4J_AUTH=neo4j/72054321
- NEO4J_PLUGINS=["apoc"]
- NEO4J_apoc_export_file_enabled='true'
- NEO4J_apoc_import_file_enabled='true'
- NEO4J_apoc_import_file_use__neo4j__config='true'
networks:
- cloud_whisper_custom_bot
discovery_worker:
env_file:
- "./.env.postgres"
build:
context: .
dockerfile: ./Dockerfile
image: cloud_whisper_custom_web
entrypoint: ./scripts/discovery_worker.sh
container_name: discovery_worker
links:
- redis
depends_on:
- redis
- cloud_whisper_web
- postgresdb
environment:
- NEO4J_URI=bolt://neo4j:7687
volumes:
- .:/CloudWhisperCustomBot
restart: always
networks:
- cloud_whisper_custom_bot
cloud_whisper_worker:
build:
context: .
dockerfile: ./Dockerfile
image: cloud_whisper_custom_web
entrypoint: ./scripts/worker.sh
container_name: cloud_whisper_worker
links:
- redis
- postgresdb
depends_on:
- redis
volumes:
- .:/CloudWhisperCustomBot
restart: always
networks:
- cloud_whisper_custom_bot
beat:
build:
context: .
dockerfile: ./Dockerfile
image: cloud_whisper_custom_web
entrypoint: ./scripts/beat.sh
container_name: beat
links:
- redis
depends_on:
- redis
- cloud_whisper_web
volumes:
- .:/app/redis_data
restart: always
networks:
- cloud_whisper_custom_bot
redis:
image: redis:latest
container_name: redis
networks:
- cloud_whisper_custom_bot
postgresdb:
image: postgres:16
env_file:
- "./.env.postgres"
container_name: postgresdb
environment:
POSTGRES_USER: admin
POSTGRES_PASSWORD: admin123
POSTGRES_DB: cloud_whisper
PGDATA: /data/postgres
ports:
- "5432:5432"
volumes:
- dbdata:/data/postgres
networks:
- cloud_whisper_custom_bot
pgadmin:
image: dpage/pgadmin4
container_name: pgadmin4
restart: always
ports:
- "8888:80"
environment:
PGADMIN_DEFAULT_EMAIL: admin@wanclouds.net
PGADMIN_DEFAULT_PASSWORD: admin123
PGADMIN_CONFIG_SERVER_MODE: 'False'
volumes:
- pgadmin-data:/var/lib/pgadmin
networks:
- cloud_whisper_custom_bot
volumes:
dbdata:
driver: local
pgadmin-data:
driver: local
redis_data:
driver: local
neo4jdata:
driver: local
networks:
cloud_whisper_custom_bot:
|
CloudWhisperCustomBot | alembic.ini | # A generic, single database configuration.
[alembic]
# path to migration scripts.
# Use forward slashes (/) also on windows to provide an os agnostic path
script_location = migrations
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
# Uncomment the line below if you want the files to be prepended with date and time
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
# sys.path path, will be prepended to sys.path if present.
# defaults to the current working directory.
prepend_sys_path = .
# timezone to use when rendering the date within the migration file
# as well as the filename.
# If specified, requires the python>=3.9 or backports.zoneinfo library.
# Any required deps can installed by adding `alembic[tz]` to the pip requirements
# string value is passed to ZoneInfo()
# leave blank for localtime
# timezone =
# max length of characters to apply to the "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; This defaults
# to migrations/versions. When using multiple version
# directories, initial revisions must be specified with --version-path.
# The path separator used here should be the separator specified by "version_path_separator" below.
# version_locations = %(here)s/bar:%(here)s/bat:migrations/versions
# version path separator; As mentioned above, this is the character used to split
# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
# Valid values for version_path_separator are:
#
# version_path_separator = :
# version_path_separator = ;
# version_path_separator = space
version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
# set to 'true' to search source files recursively
# in each "version_locations" directory
# new in Alembic version 1.10
# recursive_version_locations = false
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url = driver://user:pass@localhost/dbname
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks = black
# black.type = console_scripts
# black.entrypoint = black
# black.options = -l 79 REVISION_SCRIPT_FILENAME
# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
# hooks = ruff
# ruff.type = exec
# ruff.executable = %(here)s/.venv/bin/ruff
# ruff.options = --fix REVISION_SCRIPT_FILENAME
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S
|
CloudWhisperCustomBot | pyproject.toml | [tool.ruff]
line-length = 120
target-version = "py311"
[tool.ruff.format]
indent-style = "tab"
quote-style = "double"
[tool.poetry]
name = "CloudWhisperCustomBot"
version = "0.1.0"
description = "Chat with your API in Natural Language"
authors = ["syedfurqan <syedfurqan@wanclouds.net>"]
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.poetry.dependencies]
python = "^3.9"
aiohttp = "^3.9.1"
urllib3 = "^2.1.0"
transformers = "^4.36.2"
langchain-community = "^0.0.12"
loguru = "^0.7.2"
fastapi = "^0.109.0"
uvicorn = "^0.25.0"
edgedb = "^1.8.0"
python-dotenv = "^1.0.0"
openapi-pydantic = "^0.4.0"
torch = "^2.1.2"
peft = "^0.7.1"
langchain = "^0.1.0"
[tool.poetry.group.dev.dependencies]
pytest = "^7.4.4"
ruff = "^0.1.13"
|
CloudWhisperCustomBot | requirements.txt | aiohttp==3.9.0
alembic==1.9.0
alembic_postgresql_enum==1.3.0
anthropic==0.34.2
asyncpg==0.27.0
bcrypt==4.1.3
celery==5.3.1
celery-singleton==0.3.1
faiss-cpu==1.7.4
fastapi==0.104.1
httpx==0.27.0
langchain==0.0.351
langchain-community==0.0.3
langchain-core==0.1.1
loguru==0.7.2
llama-index==0.10.58
llama-index-vector-stores-qdrant==0.2.8
mailchimp-transactional==1.0.46
neo4j==5.21.0
openai==1.37.0
openapi-schema-pydantic==1.2.4
boto3==1.35.34
openapi_pydantic==0.3.2
pydantic==1.10.13
pydantic_core==2.4.0
python-dotenv==1.0.0
python-multipart==0.0.9
pycryptodome==3.20.0
qdrant-client==1.8.0
redis==4.5.4
scipy==1.11.1
sentence-transformers==2.3.1
sse-starlette==1.6.5
sqlalchemy-utils==0.41.2
sqlalchemy[asyncio]==2.0.31
uvicorn==0.24.0.post1
uvicorn[standard]==0.24.0.post1
groq==0.11.0 |
CloudWhisperCustomBot | README.md | # CloudWhisperCustomBot |
CloudWhisperCustomBot | app/main.py | from contextlib import asynccontextmanager
from fastapi import APIRouter, FastAPI
from fastapi.middleware.cors import CORSMiddleware
from loguru import logger
from qdrant_client import models
from qdrant_client.http.exceptions import UnexpectedResponse
from app.core.config import settings, setup_app_logging, neo4j_driver
from app.web import api_router
from app.worker.cypher_store import create_qdrant_client
# Define root API router
root_router = APIRouter()
async def get_neo4j_session(driver):
session = driver.session()
return session
async def check_qdrant_collection():
qd_client = create_qdrant_client(location=settings.qdrant.QDRANT_LOCATION,
api_key=settings.qdrant.QDRANT_API_KEY,
url=settings.qdrant.QDRANT_URL)
collection_name = 'cypher_queries'
try:
qd_client.get_collection(collection_name)
logger.info(f"Collection '{collection_name}' exists.")
except UnexpectedResponse as e:
logger.info(e)
qd_client.create_collection(
collection_name=collection_name,
vectors_config=models.VectorParams(size=1536, distance=models.Distance.COSINE),
)
logger.info('qdrant collection successfully made')
# Validate configuration on startup
try:
settings.base_bot.validate()
except ValueError as e:
logger.error(f"Configuration error: {e}")
exit(1)
# Asynchronous context manager for application startup
@asynccontextmanager
async def startup(app: FastAPI):
setup_app_logging(config=settings)
app.state.vector_store_index = settings.vector_store.create_vector_store_index()
app.state.neo4j_session = await get_neo4j_session(driver=neo4j_driver)
app.state.qdrant_collection = await check_qdrant_collection()
yield
# Create FastAPI application instance
app = FastAPI(
lifespan=startup,
title="Cloud Whisper API",
openapi_url=f"{settings.URL_PREFIX}/openapi.json",
docs_url=settings.DOCS_URL
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Include routers
app.include_router(api_router, prefix=settings.URL_PREFIX)
app.include_router(root_router)
|
CloudWhisperCustomBot | app/__init__.py | from app.api_discovery.discovery import discover_api_data
from app.worker.cypher_store import qdrant
from app.worker.scheduled_tasks import track_and_update_activity_status
__all__ = ["discover_api_data", "qdrant", "track_and_update_activity_status"]
|
CloudWhisperCustomBot | app/redis_scheduler.py | from datetime import timedelta
from celery import Celery
from celery.signals import worker_ready
from celery_singleton import clear_locks
from app.core.config import settings
broker = settings.redis.REDIS_URL
celery_app = Celery(
'whisper-celery',
broker=broker,
include=[
'app.api_discovery',
'app.worker.cypher_store',
'app.worker'
],
broker_connection_retry_on_startup=True
)
celery_app.conf.beat_schedule = {
'run_discovery': {
'task': 'discover_api_data',
'schedule': timedelta(minutes=1),
'options': {'queue': 'redis_queue'}
},
'track_and_update_activity_task': {
'task': 'track_and_update_activity_status',
'schedule': timedelta(seconds=10),
'options': {'queue': 'redis_queue'}
},
}
@worker_ready.connect
def unlock_all(**kwargs):
clear_locks(celery_app)
|
CloudWhisperCustomBot | app/worker/scheduled_tasks.py | import asyncio
import httpx
import mailchimp_transactional as MailchimpTransactional
from celery_singleton import Singleton
from loguru import logger
from mailchimp_transactional.api_client import ApiClientError
from sqlalchemy import select
from app.models import Profile, ActivityTracking
from app.redis_scheduler import celery_app
from app.redis_scheduler import celery_app as celery
from app.web.common.db_deps import AsyncSessionLocal, get_db_session_async_context
from ..api_discovery.utils import decrypt_api_key
from ..core.config import settings
from ..web.common.utils import update_activity_status
def run_async(coro):
loop = asyncio.get_event_loop()
return loop.run_until_complete(coro)
@celery_app.task(name="track_and_update_activity_status", base=Singleton, queue='redis_queue')
def track_and_update_activity_status():
logger.info("<==============================================================================================>")
logger.info("<==================================== INITIATING ACTIVITY TRACKING ====================================>")
logger.info("<==============================================================================================>")
async def async_operation(query):
async with AsyncSessionLocal() as session:
result = await session.execute(query)
activities = result.scalars().all()
activities = [{'status': activity.status, 'email': activity.email, 'resource_name': activity.resource_name,
'activity_type': activity.activity_type, "id": activity.id, "resource_type": activity.resource_type,
"workflow_id": activity.workflow_id, "action_id": activity.action_id} for activity in activities]
return activities
async def profile_async_operation(query):
async with AsyncSessionLocal() as session:
result = await session.execute(query)
users = result.scalars().all()
users = [{'user_id': user.user_id, 'api_key': user.api_key, 'email': user.email, 'name': user.name,
'api_key_status': user.api_key_status or ''} for user in users]
return users
async def run_task():
query = select(ActivityTracking)
user_query = select(Profile)
activities = await async_operation(query)
users = await profile_async_operation(user_query)
if activities and users:
for activity in activities:
for user in users:
if not user['api_key']:
continue
if user.get('api_key_status') == settings.api_key_status.STATUS_INVALID:
continue
logger.info(f"apikey: {user['api_key']}, user_id: {user['user_id']}")
decrypted_api_key = decrypt_api_key(user['api_key']['API-KEY'])
headers = {"API-KEY": decrypted_api_key}
if activity["status"] in ActivityTracking.poling_statuses_list:
async with httpx.AsyncClient() as http_client:
if activity["activity_type"] == ActivityTracking.RESTORE and activity["resource_type"] in ActivityTracking.resource_types_list:
resp = await http_client.get(
f"{settings.web.AUTH_LINK}/v1/ibm/workspaces/{activity['workflow_id']}",
headers=headers)
else:
resp = await http_client.get(
f"{settings.web.AUTH_LINK}/v1/ibm/workflows/{activity['workflow_id']}",
headers=headers)
if resp and resp.status_code == 200:
async with get_db_session_async_context() as db_session:
await update_activity_status(activity_response=resp.json(),
activity_id=activity["id"])
updated_activity= (await db_session.scalars(select(ActivityTracking).filter(
ActivityTracking.workflow_id == activity['workflow_id']))).one_or_none()
recipients = [{"email": activity["email"], "type": "to"}]
if updated_activity.status == ActivityTracking.STATUS_C_SUCCESSFULLY:
send_activity_email.delay(email_to=recipients, user_name=user['name'],
resource_type=activity["resource_type"],
resource_name=activity["resource_name"],
activity_type=activity["activity_type"],
success=True, whisper_url=settings.web.BACKEND_URI)
if updated_activity.status in ActivityTracking.failure_statues:
send_activity_email.delay(email_to=recipients, user_name=user['name'],
resource_type=activity["resource_type"],
resource_name=activity["resource_name"],
activity_type=activity["activity_type"],
success=False, whisper_url=settings.web.BACKEND_URI)
else:
logger.info("NO ACTIVITY FOUND IN DATABASE")
asyncio.run(run_task())
@celery.task(name="send_activity_email", base=Singleton, queue='redis_queue')
def send_activity_email(email_to: list, user_name: str = "", resource_type: str = "", resource_name: str = "",
whisper_url: str = "", activity_type: str = "", success: bool = None) -> None:
"""
This function initializes the Mailchimp client and sends an email.
"""
# Handle special cases for activity type
if activity_type.lower() == "backup":
action_verb = "backed up"
elif activity_type.lower() == "restore":
action_verb = "restored"
else:
action_verb = f"{activity_type.lower()}ed" # default for other actions
if success:
subject = f"{activity_type.capitalize()} completed: {resource_type}"
text = (f"Hey {user_name},\n\nYour {resource_type} ({resource_name}) has successfully been {action_verb}. "
f"Please visit the following link for further details:\n{whisper_url}\n\nThanks,\nWanclouds Inc.")
else:
subject = f"{activity_type.capitalize()} failed: {resource_type}"
text = (
f"Hey {user_name},\n\nUnfortunately, there was an issue with your {activity_type} attempt for"
f" {resource_name}."
f"Please check the details and retry or contact support for further assistance.\n"
f"Visit {whisper_url} for more information.\n\nThanks,\nWanclouds Inc.")
mailchimp = MailchimpTransactional.Client(settings.email.MANDRILL_API_KEY)
message = {
"from_email": settings.email.MAIL_USERNAME,
"subject": subject,
"text": text,
"to": email_to
}
try:
response = mailchimp.messages.send({"message": message})
logger.info('Email sent successfully: {}'.format(response))
except ApiClientError as error:
logger.error('An exception occurred: {}'.format(error.text))
|
CloudWhisperCustomBot | app/worker/cypher_store.py | import time
from llama_index.core import VectorStoreIndex, ServiceContext
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.schema import TextNode
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.vector_stores.qdrant import QdrantVectorStore
from loguru import logger
from qdrant_client import QdrantClient
from app.core.config import settings
from app.redis_scheduler import celery_app
def get_qdrant_pipeline():
qd_client = create_qdrant_client(location=settings.qdrant.QDRANT_LOCATION,
api_key=settings.qdrant.QDRANT_API_KEY,
url=settings.qdrant.QDRANT_URL)
collection_name = "cypher_queries"
vector_store = QdrantVectorStore(
client=qd_client,
collection_name=collection_name
)
pipeline = IngestionPipeline(
transformations=[
OpenAIEmbedding(api_key=settings.openai.OPENAI_API_KEY,
embed_batch_size=10, model="text-embedding-3-small")
],
vector_store=vector_store
)
return pipeline
def create_qdrant_client(location=None, url=None, api_key=None, timeout=None):
# Use the provided arguments or fall back to the default configuration
location = location or settings.qdrant.QDRANT_LOCATION
url = url or settings.qdrant.QDRANT_URL
api_key = api_key or settings.qdrant.QDRANT_API_KEY
timeout = timeout or settings.qdrant.QDRANT_TIME_OUT
# Directly instantiate the QdrantClient with the appropriate parameters
return QdrantClient(url=url, api_key=api_key, timeout=timeout) if url and api_key else QdrantClient(location,
timeout=timeout)
def qdrant_retrieval(query, k):
q_client = create_qdrant_client(location=settings.qdrant.QDRANT_LOCATION,
api_key=settings.qdrant.QDRANT_API_KEY,
url=settings.qdrant.QDRANT_URL)
vector_store = QdrantVectorStore(
client=q_client,
collection_name='cypher_queries'
)
llm = OpenAI(api_key=settings.openai.OPENAI_API_KEY)
service_context = ServiceContext.from_defaults(llm=llm,
embed_model=OpenAIEmbedding(
api_key=settings.openai.OPENAI_API_KEY,
embed_batch_size=10, model="text-embedding-3-small"))
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
retriever = index.as_retriever(similarity_top_k=k, **{"vector_store_query_mode": "text_search"})
docs_with_scores = retriever.retrieve(query)
return docs_with_scores
@celery_app.task(name="qdrant")
def qdrant(question, cypher):
nodes = [TextNode(text=question, metadata={"cypher": str(cypher.strip())}, text_template='{content}')]
logger.info('<<<<<<<<<<<<<<<cypher and query>>>>>>>>>>>>>>>>>>>>')
logger.info(nodes)
logger.info('<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>')
start_time = time.time()
docs_with_scores = qdrant_retrieval(question, 1)
end_time = time.time()
execution_time = end_time - start_time
logger.info('<<<<<<<<<<<<<<getting time>>>>>>>>>>>>>>>>>>>>')
logger.info(execution_time)
logger.info('<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>')
logger.info('<<<<<<<<<<<<<<<docs_with_scores>>>>>>>>>>>>>>>>>>>>')
logger.info(docs_with_scores)
logger.info('<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>')
if not docs_with_scores:
logger.info('No similar documents found.')
pipeline = get_qdrant_pipeline()
pipeline.run(nodes=nodes)
else:
score = docs_with_scores[0].score
logger.info('<<<<<<<<<<<<<<<SCORE>>>>>>>>>>>>>>>>>>>>')
logger.info(score)
logger.info('<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>')
# Check if the similarity score is below the threshold
if score < 0.9:
logger.info("Similarity score below threshold. Adding new document to the vector store.")
start_time1 = time.time()
pipeline = get_qdrant_pipeline()
pipeline.run(nodes=nodes)
end_time1 = time.time()
execution_time1 = end_time1 - start_time1
logger.info('<<<<<<<<<<<<<<<Time taken to store the query>>>>>>>>>>>>>>>>>>>>')
logger.info(execution_time1)
logger.info('<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>')
else:
logger.info("Similar cypher and query already exists in the vector database. Skipping save.")
|
CloudWhisperCustomBot | app/web/__init__.py | from fastapi import APIRouter
from app.web.chats import whisper_chats
from app.web.clouds import whisper_clouds
# from app.web.knowledge_graphs import whisper_knowledge_graphs
from app.web.profiles import whisper_profiles
from app.web.profiles import router
from app.web.websockets import websockets_chats
from app.web.activity_tracking import activity_tracking_n_recommendations
api_router = APIRouter()
api_router.include_router(whisper_chats, prefix="/chats", tags=["Chat"])
api_router.include_router(whisper_profiles, prefix="/profiles", tags=["Profile"])
api_router.include_router(router, prefix="/user/profile", tags=["Profile"])
# api_router.include_router(whisper_knowledge_graphs, prefix="/knowledge_graph", tags=["Knowledge Graph"])
api_router.include_router(websockets_chats, prefix="/websockets", tags=["Websockets Chat"])
api_router.include_router(whisper_clouds, prefix="/clouds", tags=["Clouds"])
api_router.include_router(activity_tracking_n_recommendations, prefix="/activity-tracking-n-recommendations",
tags=["Activity Tracking & Recommendations"])
|
CloudWhisperCustomBot | app/web/profiles/schemas.py | from typing import Optional, Dict
from pydantic import BaseModel
from enum import Enum
class UpdateAppearanceRequest(BaseModel):
appearance: Optional[Dict] = None
class OnboardingStatus(str, Enum):
app_tour = "app_tour"
action_tour = "action_tour"
onboarded = "onboarded"
class UpdateOnboardingStatusRequest(BaseModel):
onboarding_status: OnboardingStatus
profile_id: str
|
CloudWhisperCustomBot | app/web/profiles/__init__.py | from .api import whisper_profiles, router
__all__ = ["whisper_profiles", "router"]
|
CloudWhisperCustomBot | app/web/profiles/api.py | from http import HTTPStatus
import httpx
from loguru import logger
from fastapi import APIRouter, Depends, HTTPException
from fastapi.responses import JSONResponse
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select
from app import models
from app.api_discovery.utils import update_profile_with_vpcplus_api_key, decrypt_api_key
from app.web.common.utils import update_appearance_in_db
from app.web.profiles.schemas import UpdateAppearanceRequest, UpdateOnboardingStatusRequest
from app.core.config import settings
from app.web.common import db_deps, deps
whisper_profiles = APIRouter()
router = APIRouter()
@whisper_profiles.post("/api-key", name="Add or Update VPC+ API Key")
async def add_api_key(
api_key: str,
user=Depends(deps.authenticate_user)
):
headers = {"API-KEY": api_key}
async with db_deps.get_db_session_async_context() as db_session:
with httpx.Client() as http_client:
resp = http_client.get(f"{settings.web.AUTH_LINK}/v1/users/api_key", headers=headers)
if not resp or resp.status_code == 401:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail={"error": f"Invalid or Expired API Key '{api_key}' found."}
)
api_key_json = resp.json()
api_key_name = api_key_json.get("name")
api_key_expiry = api_key_json.get("expires_at")
profile = await update_profile_with_vpcplus_api_key(
profile_id=user['id'],
api_key=api_key, api_key_name=api_key_name, api_key_expiry=api_key_expiry
)
api_key = profile.api_key
return JSONResponse(content={"name": api_key['name'] or '', "expires_at": api_key['expires_at'] or ''},
status_code=200)
@whisper_profiles.get("/api-key", name="Get VPC+ API Key details")
async def get_api_key(
user=Depends(deps.authenticate_user)
):
from app.models import Profile
async with db_deps.get_db_session_async_context() as db_session:
profile = (await db_session.scalars(select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
if not profile:
raise HTTPException(status_code=404, detail=f"User with ID {profile.user_id} not found")
api_key = profile.api_key
if not api_key:
raise HTTPException(status_code=204, detail="API Key not found")
decrypted_api_key = decrypt_api_key(api_key.get('API-KEY') or '')
return JSONResponse(content={"name": api_key['name'] or '', "key": decrypted_api_key or '',
"expires_at": api_key['expires_at'] or '', "last_updated_at": profile.last_updated_at.isoformat() if profile.last_updated_at else None }, status_code=200)
@router.get("")
async def get_user_details(
profile=Depends(deps.authenticate_user),
):
return profile
@router.patch("/appearance")
async def update_user_appearance(
setting: UpdateAppearanceRequest,
profile=Depends(deps.authenticate_user)
):
async with db_deps.get_db_session_async_context() as db_session:
updated_profile = await update_appearance_in_db(
profile=profile,
appearance=setting.appearance
)
if not updated_profile:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail=f"No User Profile with id {profile['uuid']} found"
)
return updated_profile.to_json()
@whisper_profiles.post("/onboarding", name="Update Onboarding Status")
async def add_onboarding_status(
request: UpdateOnboardingStatusRequest,
profile=Depends(deps.authenticate_user)
):
profile_id = request.profile_id
onboarding_status = request.onboarding_status
async with db_deps.get_db_session_async_context() as db_session:
profile = (await db_session.scalars(select(models.Profile).filter(models.Profile.user_id == profile_id))).one_or_none()
if not profile:
raise HTTPException(status_code=404, detail=f"Profile not found with id {profile_id}")
profile.onboarding = onboarding_status
await db_session.commit()
logger.info(f"Updated profile with ID {profile_id} to onboarding status {profile.onboarding}.")
return {
"detail": "Onboarding status updated successfully.",
"profile": profile.to_reference_json()
}
|
CloudWhisperCustomBot | app/web/common/consts.py | CREATED_AT_FORMAT_WITH_MILLI_SECONDS = '%Y-%m-%dT%H:%M:%S.%fZ'
|
CloudWhisperCustomBot | app/web/common/templates.py | ROUTE_TEMPLATE = """You are a team member of the 'Cloud Whisperer' project by Wanclouds, an expert Cloud Support Engineer specializing in cloud backups, disaster recovery, and migrations. Your expertise covers major public clouds (IBM Cloud, AWS, Google Cloud, Microsoft Azure) and Wanclouds' offerings. You assist potential customers with insights, queries, and guidance on backup, disaster recovery, and migration setups across these platforms.
Your task is to analyze the user's latest query along with the chat history to select the appropriate tool(s) for handling the request.
<available-tools>
Below you have the description of the available tools, You always have to use one of the listed tools.
1. QnA_or_Schedule_a_call: Use this tool for all general inquiries, questions, and requests for information regarding the VPC+ product, cloud services, or migration topics. This includes inquiries about migration alerts, requirements, procedures, or general datacenter updates. Additionally, use this tool when the user wants to schedule a call with the Wanclouds team..
Examples:
- "What is DRaaS?"
- "Tell me about your migration offerings."
- "How does cloud migration work?"
- "What are the benefits of migrating to the cloud?"
- "I received a notification about migrating from a specific datacenter, is this true?"
- "Do I need to migrate my resources from a particular IBM datacenter?"
- "What's the timeline for datacenter migrations?"
- "How do I initiate the migration process for my classic infrastructure?"
- "Can you help me move my workloads from the legacy datacenter to the new MZR?"
- "I want to migrate from dal09 dc, can you help me move my workloads"
- "I would like to schedule a call"
- "I want to setup a meeting with wanclouds team"
- "Can i discuss this over a call?"
- "I need a call to discuss my migration requirements"
- "How can i schedule a call with Wanclouds?"
2. Action: Use when the user intends to perform actions on the VPC+ product, such as creating, modifying, or deleting resources.
Examples:
- "Create a new VPC"
- "Delete my cloud account"
- "Modify my backup settings"
- "I want to create a one-time backup of my IBM VPC."
- "I want to set up scheduled backups for my IBM VPC."
- "I want to back up my Cloud Object Storage (COS) bucket once."
- "I want to schedule regular backups for my Cloud Object Storage (COS) bucket."
- "I want to restore a VSI backup in the same region and VPC."
- "I want to schedule backups for my IBM instance (VSI)."
- "I want to restore a backup for my Cloud Object Storage (COS) bucket."
- "I want to restore a backup for my IBM VPC."
- "I want to create a one-time backup for my IBM IKS cluster."
- "I want to set up scheduled backups for my IBM IKS clusters."
3. ClassicMigration: This tool SHOULD ONLY BE USED WHEN THE USER INTENDS TO PERFORM THE MIGRATION. This tool should not be used when the user is asking general help or wants to schedule a call related to migration.
Examples:
- "I want to migrate my resources from dal09 to dal10"
- "I want to migrate my workloads from data center dal9"
4. DataExplorer: Use when retrieving or analyzing specific data that that includes resources that are idle or need rightsizing from the user's VPC+ deployment across supported cloud platforms.
Examples:
- "How many VPCs do I have?"
- "Show my cloud accounts"
- "show me my snapshots"
- "show me my resources that are not backed up"
- "show me the snapshots that are idle"
- What are my idle resources?
- Which services are costing me?
- I want a report in my monthly spending.
- What are the instance names that need to be rightsized?
- Show me my recommendations.
- What idle resource do I have?
- What is the total cloud spending for the analyzed month?
- How many cost optimization recommendations were identified?
- What is the potential savings amount if the recommendations are implemented?
- What are the top 5 services contributing to the cloud spending?
- Which service category accounts for the largest portion of the total spending?
- What percentage of total costs do Kubernetes and Bare Metal services combined represent?
- How does the current month's spending compare to the previous month?
- What is the percentage difference between the current month's spending and the 12-month average?
- What types of older resources continue to incur costs?
- What are the main recommendations for cost optimization?
- Which service has the highest percentage of total spending?
- What is the cost for Log Analysis services?
- What is the overall trend in cloud spending over the past year?
- What percentage of total spending is attributed to database services?
- How many months of data are considered in the 12-month average?
- What areas are suggested for potential cost savings without compromising operational efficiency?
- "Backup recommendations."
- "Give me a recommendation for backing up my infrastructure."
- "Recommend which resources need backup in my IBM cloud."
- "Show me my backup recommendations."
- "I need a backup suggestion for my IBM resources."
- "Can you identify any resources that should be backed up immediately?"
</available-tools>
<tool-selection-process>
Use these instruction for the Tool Selection Process:
1. Analyze the complete user query (chat history + latest query).
2. Identify key phrases or intentions that align with specific tools.
3. For ANY migration-related queries that involve questions, notifications, or general information, always use QnA_or_Schedule_a_call.
5. Consider the user's expertise level based on their language and questions.
6. If multiple tools seem applicable, prioritize based on the primary intention of the query.
7. For complex queries, consider breaking them down into subtasks and selecting tools for each.
9. Always select one tool from the tools provided to you.
</tool-selection-process>
<chat_history>
{chat_history}
</chat_history>
<user_query>
{query}
</user_query>
Please provide your response in the following format:
complete_user_query: [Combine the chat history and the user's latest query to form a standalone user query including all the information provided by the user in user tone like user is saying]
Tool: [Select 'QnA_or_Schedule_a_call', 'DataExplorer', 'Schedule_call' or 'Action' after determining the latest intent of the user from the chat history and user's latest response]
Explanation: [Provide a clear explanation for your tool selection, referencing specific parts of the user's query and chat history that led to your decision]
1. Specific keywords or phrases from the user's query that influenced your decision
2. How the selected tool(s) best address the user's primary intention
3. If applicable, why you chose one tool over another for similar use cases
4. How you considered the user's expertise level in your selection]
If the query is ambiguous or requires clarification, state this in your explanation and suggest what additional information would be helpful to make a more accurate tool selection."""
NARROW_DOWN_INTENT = """You are a team member of the 'Cloud Whisperer' project by Wanclouds, your job is to detects intent from user queries. Your task is to classify the user's intent based on their query. Below are the possible intents with brief descriptions. Use these to accurately determine the user's goal, and output only the intent topic. When a user starts a conversation or asks a question, follow these steps:
<instructions>
1. Greet new users warmly to establish rapport if this is user first message and chat history is empty.
2. Review the user's chat history and latest request thoroughly.
3. Determine if the request matches any specific actions in the "available actions" section.
4. If the request is unclear or could match multiple actions:
- Acknowledge any ambiguity politely.
- Engage with the user to gather more details.
- Present potential matching options from the actions list.
5. If the user's request seems to be outside the scope of the available actions:
- Politely inform the user that the requested action is not currently available.
- Offer to assist with any of the actions from the existing list.
6. Maintain clear, concise, and professional communication throughout.
7. When a single intent is found, set 'Task Finished' to true and do not ask any further questions.
8. Only set 'Task Finished' to true when a single, specific action is identified.
9. Ensure 'Task Finished' remains False if multiple actions could potentially match the user's intent, prompting further clarification.
10. When a request could match multiple actions, list all potential options and ask the user to specify.
11. Do not ask for additional details beyond what's needed to identify the action.
12. Before finalizing an intent, check the Coming Soon actions for any similar or related intents.
13. If a user's query could match both an available action and a Coming Soon action, do not narrow down to a single intent. Instead, list both possibilities and set 'Task Finished' to false.
14. When a user's query is ambiguous (e.g., doesn't specify between one-time or scheduled actions), do not assume. List all relevant intents and set 'Task Finished' to false.
15. Be cautious about inferring details not explicitly stated by the user. When in doubt, it's better to ask for clarification than to narrow down incorrectly.
</instructions>
<cloud_whisperer_introduction>
'Cloud Whisperer', by Wanclouds, is an expert Cloud Support Engineer specializing in cloud backups, disaster recovery, and migrations. Your expertise covers major public clouds (IBM Cloud, AWS, Google Cloud, Microsoft Azure) and Wanclouds' offerings. You assist potential customers with insights, queries, and guidance on backup, disaster recovery, and migration setups across these platforms.
</cloud_whisperer_introduction>
<actions>
Available actions:
1. IBM Classic or Legacy Data Center Migration: As part of IBM Datacenter Modernization initiative, IBM is closing older and classic legacy datacenters and migrating customers to the new data centers or multi-zone regions (also called MZR). Dal09 or Dal9 or Dallas 9 is currently affected and customers will have to move to another datacenter or VPC. IBM Cloud has partnered with Wanclouds to assist with these migrations. Customers will need to migrate servers including both Bare Metal or physical servers, virtual servers or machines called VSIs or VMs, Firewalls, Loadbalancers.
2. Create one-time IBM VPC backup: One-time backup of an IBM VPC blueprint, including all configurations and architecture details.
3. Create scheduled IBM VPC backup: Set up periodic backups of an IBM VPC blueprint, including all configurations and architecture details with customizable backup policies policies.
4. Restore IBM VPC backup: Restore IBM VPC backups.
5. Create one-time Cloud Object Storage (COS) bucket backup: One-time backup of Cloud Object Storage (COS) buckets.
6. Create scheduled Cloud Object Storage (COS) bucket backup: Set up periodic backups for IBM COS buckets with customizable policies.
7. Restore Cloud Object Storage (COS) bucket backup: Restore IBM Cloud Object Storage (COS) bucket backups.
8. Create one-time IBM IKS cluster backup: One-time backup of IBM Kubernetes Service (IKS) clusters.
9. Create scheduled IBM IKS cluster backup: Set up periodic backups for IBM IKS clusters with customizable policies.
10. Create scheduled IBM instance (VSI) backup: Set up periodic backups for an IBM instance with policies.
11. Restore IBM VSI backup in same region and VPC: Restore IBM VSI backup in same region and VPC.
12. Restore IBM VSI backup in different region and VPC: Restore IBM VSI backup in different region and VPC
13. Restore IKS backup in an existing cluster: Restore IKS backup in an existing IKS cluster.
14. Restore IBM IKS Cluster backup in existing vpc: Restore IBM IKS Cluster backup in existing VPC.
</actions>
<coming soon actions>
Coming soon actions:
1. Restore VSI backup using custom template: Restore VSI backup using custom template meaning the user can restore VSI backup in different vpc in same or different region.
2. Create one-time IBM Virtual Server Instance (VSI) Backup: One-time backup of IBM Virtual Server instances.
</coming soon actions>
<chat_history>
{chat_history}
</chat_history>
<user_query>
{query}
</user_query>
Please strictly adhere to the following template format:
Thought: [Take a moment to relax and start analyzing the chat history and the latest user query. Find out user request matches to which Actions and Coming Soon Actions. Do not make assumptions]
Intent Narrowed: [Action(s) from action list, coming soon list that can be narrowed down based on the user chat]
Task Analysis: [Analyze "Intent Narrowed" section carefully if there are more than one action narrowed down then mark task as false, if there is only action narrowed down and its from coming soon then again mark task as false otherwise plan out yourself]
Task Finished: [Mark it True/False only if action narrowed down to one of the ActionsList]
Response: {response}"""
CONFIDENCE_SCORING_BOT = """You are a confidence scoring assistant for intent classification, a team member of the 'Cloud Whisperer' project by Wanclouds. Your job is to analyze the conversation, the single narrowed-down intent provided by the intent narrowing bot, along with the intents list and coming soon actions. You will assign a confidence score based on how well the query and context match the intent.
<Instructions>
1. Carefully read the conversation, list of Intents, coming soon actions, and the narrowed-down intent.
2. Analyze the semantic similarity between the query, chat context, and the intent description.
3. Consider any specific keywords or phrases that strongly indicate the particular intent.
4. Assess how well the intent aligns with the overall conversation context.
5. Check if there are any similar or conflicting actions in the Coming Soon list.
6. Assign a confidence score between 0 and 100 for the narrowed-down intent.
7. Provide a brief explanation for your scoring.
8. Use the following confidence levels:
- High: 90-100
- Moderate: 70-89
- Low: 0-69
9. If there's a similar action in Coming Soon, reduce the confidence score appropriately and highlight this in your explanation.
10. If the user's query is ambiguous or could match multiple intents (including Coming Soon actions), assign a lower confidence score and recommend clarification.
</Instructions>
<cloud_whisperer_introduction>
'Cloud Whisperer', by Wanclouds, is an expert Cloud Support Engineer specializing in cloud backups, disaster recovery, and migrations. Your expertise covers major public clouds (IBM Cloud, AWS, Google Cloud, Microsoft Azure) and Wanclouds' offerings. You assist potential customers with insights, queries, and guidance on backup, disaster recovery, and migration setups across these platforms.
</cloud_whisperer_introduction>
<conversation>
Convertion:
{chat_history}<conversation
<intents_list>
List of Intents that were provided to narrowing down bot:
1. IBM Classic or Legacy Data Center Migration: As part of IBM Datacenter Modernization initiative, IBM is closing older and classic legacy datacenters and migrating customers to the new data centers or multi-zone regions (also called MZR). Dal09 or Dal9 or Dallas 9 is currently affected and customers will have to move to another datacenter or VPC. IBM Cloud has partnered with Wanclouds to assist with these migrations. Customers will need to migrate servers including both Bare Metal or physical servers, virtual servers or machines called VSIs or VMs, Firewalls, Loadbalancers.
2. Create one-time IBM VPC backup: One-time backup of an IBM VPC, including all configurations and resources.
3. Create scheduled IBM VPC backup: Set up periodic backups for an IBM VPC with customizable policies.
4. Restore IBM VPC backup: Restore IBM VPC backups.
5. Create one-time Cloud Object Storage (COS) bucket backup: One-time backup of Cloud Object Storage (COS) buckets.
6. Create scheduled Cloud Object Storage (COS) bucket backup: Set up periodic backups for IBM COS buckets with customizable policies.
7. Restore Cloud Object Storage (COS) bucket backup: Restore IBM Cloud Object Storage (COS) bucket backups.
8. Create one-time IBM IKS cluster backup: One-time backup of IBM Kubernetes Service (IKS) clusters.
9. Create scheduled IBM IKS cluster backup: Set up periodic backups for IBM IKS clusters with customizable policies.
10. Create scheduled IBM instance (VSI) backup: Set up periodic backups for an IBM instance with policies.
11. Restore IBM VSI backup in same region and VPC: Restore IBM VSI backup in same region and VPC.
12. Restore IBM VSI backup in different region and VPC: Restore IBM VSI backup in different region and VPC
13. Restore IKS backup in an existing cluster: Restore IKS backup in an existing IKS cluster.
14. Restore IBM IKS Cluster backup in existing vpc: Restore IBM IKS Cluster backup in existing VPC.
</intents_list>
<coming soon actions>
Coming soon actions:
1. Restore VSI backup using custom template: Restore VSI backup using custom template meaning the user can restore VSI backup in different vpc in same or different region.
2. Create one-time IBM Virtual Server Instance (VSI) Backup: One-time backup of IBM Virtual Server instances.
</coming soon actions>
<intent_narrowed_down>
Narrowed Intent: {narrowed_intent}
</intent_narrowed_down>
Please respond in the following format:
Analysis: [Take a moment to relax and start carefully analyzing the conversation, intent narrowed, intent list and coming soon actions. Highlight any ambiguities or conflicts with Coming Soon actions. If the user's query is ambiguous or could match multiple intents like (Restore VPC Backup & Restore IKS Cluster in existing VPC), assign a lower confidence score and recommend clarification]
Confidence Score: [Score] - [Brief explanation, including any impact from Coming Soon actions]
Overall Confidence: [High/Moderate/Low]
Recommendation: [Whether to proceed without confirmation, use implicit confirmation, ask for explicit confirmation, or seek clarification on specific points]"""
NARROW_DOWN_MIGRATION_INTENT = """
You are a team member of the 'Cloud Whisperer' project by Wanclouds, your job is to detects intent from user queries. Your task is to classify the user's intent based on their query. Below are the possible intents with brief descriptions. Use these to accurately determine the user's goal, and output only the intent topic. When a user starts a conversation or asks a question, follow these steps:
<instructions>
1. Greet new users warmly to establish rapport if this is user first message and chat history is empty.
2. Review the user's chat history and latest request thoroughly.
3. Determine if the request matches any specific actions in the "available actions" section.
4. If the request is unclear or could match multiple actions:
- Acknowledge any ambiguity politely.
- Engage with the user to gather more details.
- Present potential matching options from the actions list.
5. If the user's request seems to be outside the scope of the available actions:
- Politely inform the user that the requested action is not currently available.
- Offer to assist with any of the actions from the existing list.
6. Maintain clear, concise, and professional communication throughout.
7. When a single intent is found, set 'Task Finished' to true and do not ask any further questions.
8. Only set 'Task Finished' to true when a single, specific action is identified.
9. Ensure 'Task Finished' remains False if multiple actions could potentially match the user's intent, prompting further clarification.
10. When a request could match multiple actions, list all potential options and ask the user to specify.
11. Do not ask for additional details beyond what's needed to identify the action.
</instructions>
<cloud_whisperer_introduction>
'Cloud Whisperer', by Wanclouds, is an expert Cloud Support Engineer specializing in cloud backups, disaster recovery, and migrations. Your expertise covers major public clouds (IBM Cloud, AWS, Google Cloud, Microsoft Azure) and Wanclouds' offerings. You assist potential customers with insights, queries, and guidance on backup, disaster recovery, and migration setups across these platforms.
</cloud_whisperer_introduction>
<actions>
Available actions:
1. START IBM Classic or Legacy Data Center Migration: In this the user can start the pre migration step of selecting their workloads and then scheduling a call with the Wanclouds Migration Team.
2. Schedule a meeting for IBM Classic or Legacy Data Center Migration: In this the user can directly schedule a meeting and then discuss everything related to migration on call with the Wanclouds Migration Team.
</actions>
<examples>
Suppose the Actions are:
1. Enable email notifications: This Action enables email notification of your alerts. As soon as you there is an alert, you'll get email.
2. Enable SMS notifications: This Action enables sms notification of your alerts. As soon as you there is an alert, you'll get sms.
3. Disable email notifications: This Action disables email notification of your alerts.
4. Disable SMS notifications: This Action disables sms notification of your alerts.
and coming soon actions are
1. change email password
<example0>
<chat_history>
</chat_history>
<user_query>
I want to change my email password.
</user_query>
You should respond as:
Thought: This is the user's first message and chat history is empty, so a greeting is appropriate. The user query doesn't match any existing actions in the available list. It is categorized under "Coming Soon" actions.
Intent Narrowed:
- Change Email Password
Task Analysis: This task is currently under development and is not available at the moment.
Task Finished: False
Response: Hello and welcome! I'm here to assist you. Regarding your request to change your email password, this feature is currently being developed and will be available soon. Please check back later for updates. In the meantime, if you need any assistance with other features, feel free to ask!
<example1>
<chat_history>
</chat_history>
<user_query>
I want to turn on notifications.
</user_query>
You should respond as:
Thought: This is the user's first message and chat history is empty, so a greeting is appropriate. The user wants to enable notifications, but it's not clear if they want email or SMS notifications.
Intent Narrowed:
- Enable email notifications
- Enable SMS notifications
Task Analysis: The task is not finished as we have more than one intent
Task Finished: False
Response: Hello and welcome! I'm here to help you with turning on notifications. Could you please specify whether you'd like to enable email notifications or SMS notifications?
<example2>
<chat_history>
<user>I want to turn on notifications.</user>
<assistant>Hello and welcome! I'm here to help you with turning on notifications. Could you please specify whether you'd like to enable email notifications or SMS notifications?</assistant>
</chat_history>
<user_query>
I prefer email notifications.
</user_query>
You should respond as:
Thought: The user has clarified that they want to enable email notifications. This is not their first message, so no greeting is needed.
Intent Narrowed: Enable email notifications
Task Analysis: The task can now be marked as finished as there is a single intent in the Narrow Down section
Task Finished: True
Response: Sure, I can help you with that.
<example3>
<chat_history>
<user>How do I manage my account settings?</user>
<assistant>I'd be happy to help you with managing your account settings. Could you please specify which particular setting you'd like to adjust? For example, we can help with notification preferences.</assistant>
<user>Oh, I see. In that case, I'd like to adjust my notifications.</user>
<assistant>Certainly! I can help you with adjusting your notifications. Would you like to enable or disable notifications? And do you prefer email or SMS notifications?</assistant>
</chat_history>
<user_query>
Hello, I want to turn on notifications. Turn on notifications for sms service
</user_query>
You should respond as:
Thought: The user wants to enable SMS notifications. This is not their first message, so no greeting is needed.
Intent Narrowed: Enable SMS notifications
Task Analysis: The task can now be marked as finished as there is a single intent in the Narrow Down section
Task Finished: True
Response: Certainly, I can help you enable SMS notifications for your alerts.
<example4>
<chat_history>
<user>Tell me about your product</user>
<assistant>Hello, I'm your support agent for ABC product....</assistant>
</chat_history>
<user_query>
Can you send an email to Ali that you need to attend a meeting with customer
</user_query>
You should respond as:
Thought: The user wants to send an email which is not in actionslist or coming soon, so it's out of context. This is not their first message, so no greeting is needed.
Intent Narrowed: []
Task Analysis: Task should be marked as False as user query is out of context
Task Finished: False
Response: I apologize, but I'm not able to send emails on your behalf. My capabilities are limited to helping with specific actions related to our product, such as managing notifications. Is there anything related to our product that I can assist you with?
</example4>
</examples>
<chat_history>
{chat_history}
</chat_history>
<user_query>
{query}
</user_query>
Please strictly adhere to the following template format:
Thought: [Analyze the chat history and the latest user query in relation to the Actions/ComingSoon. Do not make assumptions]
Intent Narrowed: [Action(s) from action list that are narrowed down based on the user chat]
Task Analysis: [Analyze "Intent Narrowed" section carefully]
Task Finished: [Mark it True/False only if action narrowed down to one of the ActionsList]
Response: [Your response to the user, including any necessary questions or confirmations]
"""
KNOWLEDGE_GRAPH_PROMPT = """ You are a helpful assistant tasked with extracting 'NEW' Nodes and relationships from
the Open API spec of API responses for the neo4j knowledge graph. It's crucial to avoid duplicates from the existing
nodes.
###Example1:
```
"Existing Nodes":["CarVault": "A mix of cars gathered from our database"]
*existing node description is same as vehicleCollection thats why it was not included in list of nodes in response.
input :"200": {
"content": {
"application/json": {
"schema": {
"properties": {
"vehicleCollection": {
"type": "array",
"items": {
"type": "object",
"properties": {
"vehicleId": {
"type": "string",
"description": "Unique identifier for the vehicle."
},
"make": {
"type": "string",
"description": "Manufacturer of the vehicle."
},
"model": {
"type": "string",
"description": "Model of the vehicle."
},
"year": {
"type": "integer",
"description": "Manufacturing year of the vehicle."
},
"owner": {
"type": "object",
"properties": {
"userId": {
"type": "string",
"description": "Identifier for the owner of the vehicle."
},
"name": {
"type": "string",
"description": "Name of the vehicle owner."
},
"contactDetails": {
"type": "object",
"properties": {
"emailAddress": {
"type": "string",
"description": "Email address of the owner."
},
"phoneNumber": {
"type": "string",
"description": "Phone number of the owner."
}
},
"required": ["emailAddress", "phoneNumber"]
}
},
"required": ["userId", "name", "contactDetails"]
}
},
"required": ["vehicleId", "make", "model", "year", "owner"]
}
},
"pagination": {
"type": "object",
"properties": {
"current": {
"type": "integer",
"description": "Current page of the vehicle collection."
},
"perPage": {
"type": "integer",
"description": "Number of vehicle records per page."
},
"total": {
"type": "integer",
"description": "Total number of vehicle records available."
}
},
"required": ["current", "perPage", "total"]
}
},
"required": ["vehicleCollection", "pagination"]
}
}
},
"description": "Successful response"
}
}
Response should be:
"Nodes": {
"Vehicle": "A single vehicle, typically associated with an owner.",
"Owner": "The individual or entity that legally possesses the vehicle.",
"ContactDetails": "Contact information of the vehicle owner, including email and phone number.",
"Pagination": "Information about the pagination of the vehicle list."
},
"Relationships": [
{"source": "Vehicle", "target": "Owner", "relationship": "OWNED_BY"},
{"source": "Owner", "target": "ContactDetails", "relationship": "HAS_CONTACT_INFO"}
],
"Location": {
"VehicleCollection": "vehicleCollection[*]",
"Vehicle": "vehicleCollection[*]",
"Owner": "vehicleCollection[*].owner",
"ContactDetails": "vehicleCollection[*].owner.contactDetails",
"Pagination": "pagination"
},
"Attribute": {
"Vehicle": ["vehicleId","make","model","year"]
"Owner": ["userId","name"]
"ContactDetails": ["emailAddress","PhoneNumber"]
"Pagination": ["current","perPage","total"]
}
}
```
"Existing Nodes": {nodes}
Instructions:
1. Begin by analyzing previously used nodes and successful responses in the API spec.
2. Examine existing node descriptions. If the node already exists, utilize it without creating a duplicate in your
response.
3. Do not create any aliases nodes in your response that already exist in "Existing Nodes".
3. Only append new nodes found in the API spec.
4. Ensure CamelCase naming convention for new nodes.
5. Do not omit any nodes.
6. It's crucial to ensure that the descriptions for each parameter highlight their intrinsic properties rather than
just their relation to the resources.
Response Format:
- Nodes: [Python dictionary listing all significant nodes in the Open API spec response payload with their brief
descriptions. Do not omit any node.]
- Relationships: [Python List containing all meaningful relationships in the Open API specs. Use dictionaries with
'source', 'target', and 'relationship' keys.]
- Location: {Python dictionary mapping nodes to their locations in the JSON structure of the API's successful
response schema. Use '[*]' if the location is a list. Separate location keys with '.'}
- Attribute: {Python dictionary associating nodes with their attribute keys.}
Failure to include any node, relationship, or location will result in missing connections in the data.
"""
VPC_COST_OPTIMIZATION_TEMPLATE = """
The tool response has returned json data having information of:
1) Cloud service cost: per service cost details of the selected cloud account
2) Cost trends: actions taken from the recommendation and cost saved from those recommendations
<tool_response>
<cost-by-services-json>
Here is the cloud service cost json:
{service_cost}
</cost-by-services-json>
<cost-trends>
Here are the cost trends from last 12 months:
{cost_trend_payload}
</cost-trends>
</tool_response>
Now take a moment to relax. Understand the user and find out required answer from response returned"""
SOFTLAYER_INFRA_ASSESSMENT_PROMPT = """
The tool response has returned json data having information of:
1) IBM Softlayer Cloud Resources assessment data: include the high-level assessments of classic infrastructures and identifying potential cost optimization opportunities for migration to IBM Cloud VPC.
Provided IBM Softlayer Infrastructure Data:
<ibm-softlayer-cloud-resources-assessment>
The report should be based on the following IBM Softlayer Cloud Resources assessment data:
{ibm_softlayer_cloud_payload}
</ibm-softlayer-cloud-resources-assessment>
Analyzing the user query: '{query}' analyze the JSON data above and generate a response accordingly.
Important: All necessary data has already been provided, so please proceed with analyzing and generating the report without requesting further details.
"""
|
CloudWhisperCustomBot | app/web/common/utils.py | import aiohttp
import asyncio
import httpx
import json
import requests
import time
from fastapi import HTTPException
from loguru import logger
from sqlalchemy import select, update
from sqlalchemy.orm import selectinload
from app import models
from app.api_discovery.utils import update_profile_with_vpcplus_api_key
from app.core.config import settings
from app.web import activity_tracking
from app.web.common import db_deps
from app import models
from app.models.activity_tracking import ActivityTracking
async def api_key_msg_event_generator(api_key_msg, chat_id):
from app.models import Message, Chat
lines = api_key_msg.split("\n")
assistant_message = ""
for line in lines:
chunk = line
assistant_message += chunk + "\n"
yield {"event": "message", "data": line}
async with db_deps.get_db_session_async_context() as db_session:
chat = (await db_session.scalars(select(Chat).filter(Chat.id == chat_id))).one_or_none()
chat_message = Message(msg_type=Message.TYPE_ASSISTANT, content=assistant_message, msg_category=Message.TYPE_QNA)
chat_message.chat = chat
db_session.add(chat_message)
await db_session.commit()
yield {"event": "chat_info", "data": json.dumps(chat.to_reference_json())}
async def user_msg_event_generator(response_for_user, chat_id):
from app.models import Message, Chat
lines = response_for_user.split("\n")
assistant_message = ""
for line in lines:
chunk = line.lstrip()
assistant_message += chunk + "\n"
yield {"event": "message", "data": line}
async with db_deps.get_db_session_async_context() as db_client:
chat = (await db_client.scalars(select(Chat).filter(Chat.id == chat_id))).one_or_none()
chat_message = Message(msg_type=Message.TYPE_ASSISTANT, content=assistant_message, msg_category=Message.TYPE_QNA)
chat_message.chat = chat
db_client.add(chat_message)
await db_client.commit()
yield {"event": "chat_info", "data": json.dumps(chat.to_reference_json())}
async def fetch_and_update_vpcplus_api_key(authorization, user_id):
headers = {"Authorization": f"Bearer {authorization.credentials}"}
try:
async with httpx.AsyncClient(timeout=60.0) as http_client:
resp = await http_client.get(f"{settings.web.AUTH_LINK}/v1/users/api_key", headers=headers)
if resp.status_code == 200:
api_key_json = resp.json()
api_key = api_key_json.get("key")
api_key_name = api_key_json.get("name")
api_key_expiry = api_key_json.get("expires_at")
async with db_deps.get_db_session_async_context() as session:
await update_profile_with_vpcplus_api_key(
profile_id=user_id,
api_key=api_key, api_key_name=api_key_name,
api_key_expiry=api_key_expiry if api_key_expiry else None
)
else:
logger.error(f"Failed to fetch API key: {resp.status_code} {resp.text}")
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error occurred: {str(e)}")
except httpx.RequestError as e:
logger.error(f"Request error occurred: {str(e)}")
except httpx.TimeoutException as e:
logger.error(f"Request timed out: {str(e)}")
except Exception as e:
logger.error(f"An unexpected error occurred: {str(e)}")
async def update_appearance_in_db(profile, appearance):
async with db_deps.get_db_session_async_context() as db_session:
query = select(models.Profile).options(selectinload(models.Profile.chats)).filter_by(id=profile['uuid'])
result = await db_session.execute(query)
profile_setting = result.scalars().first()
if profile_setting is None:
profile_setting = models.Profile(appearance=appearance, user_id=profile["id"], name=profile["name"],
email=profile["email"])
profile_setting.profile_id = profile['uuid']
db_session.add(profile_setting)
if appearance is not None:
profile_setting.appearance = appearance
await db_session.commit()
return profile_setting
async def update_activity_tracking(activity_response, chat_id, action_id, user_id=None):
import json
logger.info(f"Activity Response: {activity_response}")
# Parse the activity_response string into a dictionary
try:
activity_response = json.loads(activity_response)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse activity_response: {e}")
return
try:
async with db_deps.get_db_session_async_context() as db_session:
result = await db_session.scalars(select(models.Profile).filter(models.Profile.user_id == user_id))
profile = result.one_or_none()
try:
if activity_response.get("reporting_type") == "WorkflowsWorkspace":
resource_type = activity_response.get("fe_request_data").get("resource_type")
resource_name = activity_response.get("fe_request_data").get("backup_name")
status = activity_response["status"]
activity_type = activity_response.get("workspace_type")
if activity_response.get("workspace_type") == "TYPE_RESTORE":
activity_type = "RESTORE"
activity_tracking = ActivityTracking(
workflow_id=activity_response["id"],
user_id=user_id,
resource_name=resource_name,
fe_request_data=activity_response.get("fe_request_data"),
resource_type=resource_type,
activity_type=activity_type,
created_at=activity_response["created_at"],
summary=f"Restoration of {resource_name} is {status}",
email=profile.email,
status=status,
started_at=activity_response.get("started_at"),
completed_at=activity_response.get("completed_at"),
action_id=action_id,
chat_id=chat_id
)
else:
activity_type = activity_response["workflow_nature"]
resource_type = activity_response["resource_type"]
if resource_type == "IBMKubernetesCluster" and activity_type == "CREATE":
activity_type = "RESTORE"
else:
activity_type = activity_response["workflow_nature"]
activity_tracking = ActivityTracking(
workflow_id=activity_response["id"],
user_id=user_id,
resource_name=activity_response["workflow_name"],
fe_request_data=activity_response.get("fe_request_data"),
resource_type=activity_response["resource_type"],
activity_type=activity_type,
created_at=activity_response["created_at"],
summary=activity_response["summary"],
email=activity_response.get("email") or profile.email,
status=activity_response["status"],
started_at=activity_response.get("started_at"),
completed_at=activity_response.get("completed_at"),
action_id=action_id,
chat_id=chat_id
)
except KeyError as e:
logger.error(f"Missing key in activity_response: {e}")
return
except Exception as e:
logger.error(f"Error while constructing activity_tracking: {e}")
return
activity_tracking.profile = profile
db_session.add(activity_tracking)
await db_session.commit()
logger.info("Activity Tracked Successfully")
except Exception as e:
logger.error(f"Unexpected error in update_activity_tracking: {e}")
async def update_activity_status(activity_response, activity_id):
from app.models import ActivityTracking
logger.info(f"Activity Response: {activity_response}")
async with db_deps.get_db_session_async_context() as db_session:
activity = (await db_session.scalars(
select(models.ActivityTracking)
.filter(models.ActivityTracking.id == activity_id)
)).one_or_none()
if not activity:
return
status = activity_response["status"]
activity_query = await db_session.execute(
update(ActivityTracking)
.where(ActivityTracking.id == activity_id)
.values(status=status, completed_at=activity_response.get('completed_at'),
summary=activity_response.get("summary") or f"Restoration of {activity.resource_name} is {status}")
.returning(ActivityTracking)
)
updated_activity = activity_query.scalars().first()
await db_session.commit()
return updated_activity
# Custom context manager for making requests and handling errors
class HttpRequestHandler:
def __init__(self, session):
self.session = session
async def post(self, url, headers):
try:
async with self.session.post(url, headers=headers) as response:
if response.status != 202:
raise HTTPException(status_code=response.status, detail=f"Error: {await response.text()}")
return await response.json()
except aiohttp.ClientError as e:
raise HTTPException(status_code=500, detail=f"HTTP request failed: {str(e)}")
async def get(self, url, headers):
try:
async with self.session.get(url, headers=headers) as response:
if response.status != 200:
raise HTTPException(status_code=response.status, detail=f"Error: {await response.text()}")
return await response.json()
except aiohttp.ClientError as e:
raise HTTPException(status_code=500, detail=f"HTTP request failed: {str(e)}")
# Async function to start the workflow
async def start_workflow(headers, cloud_account_id, session):
COST_OPTIMIZATION_REPORT_URL = settings.web.AUTH_LINK + f"/v1/softlayer/recommendations/{cloud_account_id}"
# Make POST request to start the workflow
handler = HttpRequestHandler(session)
response = await handler.post(COST_OPTIMIZATION_REPORT_URL, headers)
# Extract and return workflow ID
workflow_id = response.get("id")
if not workflow_id:
raise HTTPException(status_code=404, detail="Workflow ID not found in response.")
return workflow_id
# Async function to poll workflow status with retry mechanism and timeout
async def poll_workflow_status(workflow_id, headers, session, max_poll_time=60, polling_interval=5, max_retries=5):
WORKFLOW_STATUS_URL = settings.web.AUTH_LINK + f"/v1/ibm/workflows/{workflow_id}"
handler = HttpRequestHandler(session)
retries = 0
while retries <= max_retries:
try:
# Poll workflow status
workflow_data = await handler.get(WORKFLOW_STATUS_URL, headers)
# Check if workflow is completed
if workflow_data.get("status") == "COMPLETED_SUCCESSFULLY":
return workflow_data.get("resource_json", {})
except HTTPException as e:
# Retry logic with exponential backoff
retries += 1
if retries > max_retries:
raise HTTPException(status_code=500, detail=f"Failed after {max_retries} retries: {str(e)}")
await asyncio.sleep(2 ** retries) # Exponential backoff
# Wait for polling interval
await asyncio.sleep(polling_interval)
# If the polling timed out, return in-progress status
return {"status": "in progress", "message": "Workflow is still running. Check back later.",
"workflow_id": workflow_id}
# Main function to execute workflow and retrieve cost response
async def get_softlayer_cloud_cost_response(headers, cloud_account_id, max_poll_time=60, polling_interval=5,
max_retries=5):
async with aiohttp.ClientSession() as session:
try:
# Step 1: Start the workflow
workflow_id = await start_workflow(headers, cloud_account_id, session)
# Step 2: Poll for workflow completion
workflow_result = await asyncio.wait_for(
poll_workflow_status(workflow_id, headers, session, max_poll_time=max_poll_time,
polling_interval=polling_interval, max_retries=max_retries),
timeout=max_poll_time
)
# Return workflow result
return workflow_result
except asyncio.TimeoutError:
return {"status": "in progress", "message": "Workflow polling timed out.", "workflow_id": workflow_id}
except HTTPException as e:
raise HTTPException(status_code=500, detail=f"Error during workflow execution: {str(e)}")
|
CloudWhisperCustomBot | app/web/common/chats_websockets_utils.py | from types import AsyncGeneratorType
import aiohttp
import types
import asyncio
import httpx
import json
import re
from datetime import datetime
from fastapi import HTTPException
from fastapi.security import HTTPAuthorizationCredentials
from loguru import logger
from sqlalchemy import asc
from sqlalchemy.future import select
from sqlalchemy.orm import selectinload
from sse_starlette import EventSourceResponse
from app.core.config import settings
from app.web.common import db_deps
from app.web.common.cloud_setup_instruction_messages import (IBM_CLOUD_ACCOUNT_MESSAGE, GENERAL_CLOUD_ACCOUNT_MESSAGE,
IBM_CLASSIC_CLOUD_ACCOUNT_MESSAGE)
from app.web.common.templates import (ROUTE_TEMPLATE, NARROW_DOWN_INTENT, NARROW_DOWN_MIGRATION_INTENT,
CONFIDENCE_SCORING_BOT)
from app.web.common.utils import api_key_msg_event_generator
from app.whisper.consts import WHISPER_USER_ROLE, WHISPER_ASSISTANT_ROLE
from app.whisper.llms.anthropic import AnthropicLLM
from app.whisper.utils.action_engine import ActionPhaseClaude, ComplexActionPhaseClaude
from app.whisper.utils.migration_action_engine import MigrationActionPhaseClaude
async def process_routing(chat_id):
"""
Determines from chat history if latest user query should be routed to which tool.
Parameters:
chat_history (list): A list of previous chat messages and user latest query. It is dictionary of message type and
content. Like [{'type': 'Human', 'text': 'content'}, {'type': 'assistant', 'text': 'content'}]
Returns:
string: one of the following tool: QnA, Action
"""
from app.models import Message
try:
async with db_deps.get_db_session_async_context() as db_session:
messages_obj = (await db_session.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
if not messages_obj:
raise ValueError("No messages found for the given chat_id")
chat_json = [message.to_reference_json() for message in messages_obj]
while len(chat_json) >= 6 and chat_json[-6]['type'] == 'Assistant':
chat_json.pop(0)
chat_json_last_5 = chat_json[-6:] # limit chat history to last 5 messages
query = chat_json_last_5[-1]['text'] # query that user just asked
chat_history_str = ''
for chat_ in chat_json_last_5[:-1]: # don't add user latest query in chat history
chat_history_str += f"<{chat_['type'].lower()}>: {chat_['text'].strip()}</{chat_['type'].lower()}>\n"
client = AnthropicLLM()
prompt = ROUTE_TEMPLATE.format(query=query, chat_history=chat_history_str.strip())
response = ''
feedback_sent = False
for attempt in range(2):
if not feedback_sent:
client.add_message(role=WHISPER_USER_ROLE, content=prompt)
try:
async for text in client.process_stream():
response += text
if "complete_user_query:" in response:
if "Tool: Action" in response:
user_query = response.split("Tool: Action")[0]
user_query = user_query.split("complete_user_query:")[1].strip()
logger.info(response)
return user_query, "Action"
elif "Tool: QnA_or_Schedule_a_call" in response:
user_query = response.split("Tool: QnA_or_Schedule_a_call")[0]
user_query = user_query.split("complete_user_query:")[1].strip()
logger.info(response)
return user_query, "QnA_or_Schedule_a_call"
elif "Tool: DataExplorer" in response:
user_query = response.split("Tool: DataExplorer")[0]
user_query = user_query.split("complete_user_query:")[1].strip()
logger.info(response)
return user_query, "InformationRetrieval"
elif "Tool: ClassicMigration" in response:
user_query = response.split("Tool: ClassicMigration")[0]
user_query = user_query.split("complete_user_query:")[1].strip()
logger.info(response)
return user_query, "ClassicMigration"
if attempt == 0:
logger.info("Retrying with feedback...")
feedback = "Internal feedback: The response you generated seems to be in an incorrect format. Please review the response and ensure it adheres to the expected format, such as 'Tool: Action', 'Tool: QnA', 'Tool: ClassicMigration' or 'Tool: DataExplorer'. Additionally, the response should contain 'complete_user_query' with the full query entered by the user."
client.add_message(role=WHISPER_ASSISTANT_ROLE, content=feedback)
feedback_sent = True
continue
else:
logger.warning(f"Retry failed. Defaulting to QnA. Response: {response}")
return query, "QnA_or_Schedule_a_call"
except Exception as e:
logger.error(f"Unexpected error during response processing: {e}")
return None, None
except Exception as e:
logger.error(f"An error occurred while processing routing: {e}")
return None, None
async def execute_qna(user_name, question, chat_id):
from app.whisper.utils.qna_bot.base import QnABot
from app.models import Message
error = None
import traceback
try:
async with db_deps.get_db_session_async_context() as db_client:
messages_obj = (await db_client.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
if not messages_obj:
raise ValueError("No messages found for the given chat_id")
chat_json = [message.to_reference_json() for message in messages_obj]
chat = messages_obj[0].chat
while len(chat_json) >= 6 and chat_json[-6]['type'] == 'Assistant':
chat_json.pop(0)
chat_json_last_5 = chat_json[-7:-1] # limit chat history to last 5 messages
retrieval_client = QnABot(chat_history=chat_json_last_5, user_name=user_name)
response = await retrieval_client.start(question)
response = format_response(response)
if not response.strip():
raise ValueError("Received an empty response from the assistant")
yield {"event": "message", "data": response}
chat_message = Message(msg_type=Message.TYPE_ASSISTANT, content=response, msg_category=Message.TYPE_QNA)
chat_message.chat = chat
db_client.add(chat_message)
await db_client.commit()
logger.info(response)
except ValueError as e:
logger.error(f"An error occurred in get_information_from_db: {str(e)}")
error = {"event": "error", "data": json.dumps({"detail": str(e)})}
except Exception as e:
logger.info(e)
logger.error(f"An error occurred while retrieving information: {traceback.format_exc()}")
error = {"event": "error", "data": json.dumps({"detail": "Internal server error"})}
finally:
if error:
yield error
yield {"event": "chat_info", "data": json.dumps(chat.to_reference_json())}
yield {"event": "close"}
async def get_base_bot_response(payload):
headers = {"Content-Type": "application/json", "X-API-KEY": settings.base_bot.X_API_KEY}
timeout = aiohttp.ClientTimeout(total=50)
async with aiohttp.ClientSession(timeout=timeout) as session:
try:
async with session.post(settings.base_bot.BASE_BOT_URL, headers=headers, json=payload) as response:
if response.status != 200:
response_text = await response.text()
logger.error(f"Base Bot API Error: status code '{response.status}', response: {response_text}")
raise HTTPException(status_code=response.status, detail="Error contacting QnA API")
async for chunk in response.content.iter_any():
yield chunk.decode('utf-8')
except asyncio.TimeoutError:
logger.error("Timeout error while contacting Base Bot API")
raise HTTPException(status_code=504, detail="Timeout contacting QnA API")
async def narrow_down_intent(chat_id, user_dict, standalone_query, action_id=None, cloud_id=None):
from app.models import Message, Action
logger.info(f"Narrow down intent for chat_id: {chat_id}")
async with db_deps.get_db_session_async_context() as db_client:
messages_obj = (await db_client.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
if not messages_obj:
raise ValueError("No messages found for the given chat_id")
chat_json = [message.to_reference_json() for message in messages_obj]
chat = messages_obj[0].chat
while len(chat_json) >= 6 and chat_json[-6]['type'] == 'Assistant':
chat_json.pop(0)
chat_json_last_5 = chat_json[-6:] # limit chat history to last 5 messages
query = chat_json_last_5[-1]['text'] # query that user just asked
chat_history_str = ''
for chat_ in chat_json_last_5[:-1]: # don't add user latest query in chat history
chat_history_str = (
chat_history_str + f"<{chat_['type'].lower()}>: {chat_['text'].strip()}</{chat_['type'].lower()}>\n")
logger.debug("<<<<<<<<<<<<<<<<<<<<<Chat History>>>>>>>>>>>>>>>>>>>>")
logger.debug(chat_history_str)
logger.debug("<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>")
start_time = datetime.now()
llm_client = AnthropicLLM()
# logger.info(NARROW_DOWN_INTENT.format(chat_history=chat_history_str.strip(), query=query))
logger.info(chat_json)
if len(chat_json) == 1:
response = "[Craft a precise and professional response to the user as a support agent from Wanclouds. Greet user as well as its users first message]"
llm_client.add_message(role=WHISPER_USER_ROLE,
content=NARROW_DOWN_INTENT.format(chat_history=chat_history_str.strip(), query=query,
response=response))
else:
response = "[Craft a precise and professional response to the user as a support agent from Wanclouds. Don't add greetings. Follow previous flow of conversation from chat history]"
llm_client.add_message(role=WHISPER_USER_ROLE,
content=NARROW_DOWN_INTENT.format(chat_history=chat_history_str.strip(), query=query,
response=response))
logger.info(llm_client.messages)
complete_response = ''
action = ''
stream_started = False
try:
async for chunk in llm_client.process_stream():
complete_response += chunk
if 'Task Finished: True' in complete_response:
action = complete_response.split('Intent Narrowed:')[1]
action = action.split('Task Analysis:')[0]
action = action.strip('\n').strip() if action.startswith('\n') else action
pattern = r'^\d+'
match = re.match(pattern, action)
if match:
action = action[match.end():]
logger.info(f"ACTION ---->{action}")
logger.info(f"{complete_response}")
break
if "Response:" in complete_response and not stream_started:
stream_started = True
buffer = complete_response.split("Response:", 1)[1] # Keep only the content after "Response:"
if buffer:
yield {"event": "message", "data": buffer}
continue
if stream_started:
if chunk.startswith('\n'):
yield {"event": "message", "data": "\n"}
if chunk.startswith('\n\n'):
yield {"event": "message", "data": "\n\n"}
yield {"event": "message", "data": chunk}
if chunk.endswith('\n'):
yield {"event": "message", "data": "\n"}
if chunk.endswith('\n\n'):
yield {"event": "message", "data": "\n\n"}
except Exception as e:
logger.error(f"Error during intent narrowing: {e}")
yield {"event": "error", "data": json.dumps({"detail": "Error during intent narrowing"})}
return
logger.info(complete_response)
if not complete_response.strip():
yield {"event": "error", "data": json.dumps({"detail": "Received an empty response from the assistant"})}
return
if action:
chat_history_str = ''
for chat_ in chat_json_last_5[:]: # don't add user latest query in chat history
chat_history_str = (
chat_history_str + f"<{chat_['type'].lower()}>: {chat_['text'].strip()}</{chat_['type'].lower()}>\n")
try:
confidence_scoring_bot = AnthropicLLM()
confidence_scoring_bot.add_message(role=WHISPER_USER_ROLE,
content=CONFIDENCE_SCORING_BOT.format(
chat_history=chat_history_str.strip(),
narrowed_intent=action))
confidence_response = confidence_scoring_bot.process()
logger.info(confidence_response)
confirmation, recommendation = False, None
if 'Overall Confidence: High' in confidence_response:
confirmation = False
else:
confirmation = True
recommendation = confidence_response.split('Recommendation:')[1]
except Exception as e:
logger.error(f"Error during intent narrowing: {e}")
yield {"event": "error", "data": json.dumps({"detail": "Error during intent narrowing"})}
return
action = action.lstrip("- ") if action.startswith("- ") else action
action_tool = similarity_search_api_desc(action)
messages_obj_metadata = json.dumps({
'searched_intent': action_tool,
'task_finished': False,
'stage': 1,
'history': []
})
if confirmation:
chat.confirmation_stage = True
llm_client.add_message(role='assistant', content=complete_response)
if len(chat_json) == 1:
llm_client.add_message(role='user',
content=f'Internal Feedback: Got this feedback from confidence scoring bot whose analyze conversation history and list of intent and provided feedback \n \"{recommendation}\" \n Add greetings, start with a polite greeting. Then, without mentioning the word intent or any internal processes, please ask the user to confirm their specific request or goal. Ensure you only seek confirmation of what they want to do, and do not collect any additional requirements or information. Remember, the user is not aware of our internal workings, so keep the language user-friendly and focused on their needs.')
else:
llm_client.add_message(role='user',
content=f'Internal Feedback: Got this feedback from confidence scoring bot whose analyze conversation history and list of intent and provided feedback \n \"{recommendation}\" \nNow Without mentioning the word intent or any internal processes, please ask the user to confirm their specific request or goal. Ensure you only seek confirmation of what they want to do, and do not collect any additional requirements or information. Remember, the user is not aware of our internal workings, so keep the language user-friendly and focused on their needs.')
llm_client.add_message(role='assistant', content='Response:')
complete_response = llm_client.process()
if 'Response:' in complete_response:
yield {"event": "message", "data": complete_response.split('Response:', 1)[1]}
else:
yield {"event": "message", "data": complete_response}
else:
async with db_deps.get_db_session_async_context() as db_client:
messages_obj = (await db_client.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
messages_obj[-1].json_metadata = messages_obj_metadata
metadata = json.loads(messages_obj[-1].json_metadata)
metadata['initial_query'] = standalone_query
logger.info(metadata["searched_intent"][0])
action_obj = Action(name=metadata["searched_intent"][0], metadata=json.dumps(metadata))
action_id = action_obj.id
dummy_message_content = "This action is performed by another bot. You can start with a fresh conversation or continue with a new context."
dummy_message = Message(
msg_type=Message.TYPE_ASSISTANT,
content=dummy_message_content,
msg_category=Message.TYPE_ACTION,
is_visible=False,
)
dummy_message.chat = messages_obj[0].chat
db_client.add(dummy_message)
messages_obj[-1].action_id = action_obj.id
db_client.add(action_obj)
messages_obj[-1].msg_category = Message.TYPE_ACTION
await db_client.commit()
logger.info("hereeeee1")
yield {"event": "action", "data": json.dumps(action_obj.to_reference_json())}
logger.info(standalone_query)
stream = execute_stage_1(initial_query=standalone_query, user_dict=user_dict,
chat_id=chat_id, action_id=action_id, cloud_id=cloud_id)
async for chunk in stream:
yield chunk
logger.info(chunk)
return
yield {"event": "chat_info", "data": json.dumps(chat.to_reference_json())}
yield {"event": "close"}
end_time = datetime.now()
if 'Response:' in complete_response:
response = complete_response.split('Response:')[1]
else:
response = complete_response
async with db_deps.get_db_session_async_context() as db_client:
chat_message = Message(msg_type=Message.TYPE_ASSISTANT, content=response, msg_category=Message.TYPE_QNA)
chat_message.chat = chat
db_client.add(chat_message)
await db_client.commit()
logger.debug("<<<<<<<<<<<<<<<<<<<<<Intent Phase Response>>>>>>>>>>>>>>>>>>>>")
logger.debug(complete_response)
logger.debug("<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>")
logger.info(f"Total Seconds=>{(end_time - start_time).total_seconds()}")
async def narrow_down_migration_intent(chat_id, user_dict, standalone_query, action_id=None):
from app.models import Message
logger.info(chat_id)
async with db_deps.get_db_session_async_context() as db_client:
messages_obj = (await db_client.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
logger.info(messages_obj)
if not messages_obj:
raise ValueError("No messages found for the given chat_id")
chat_json = [message.to_reference_json() for message in messages_obj]
while len(chat_json) >= 6 and chat_json[-6]['type'] == 'Assistant':
chat_json.pop(0)
chat_json_last_5 = chat_json[-6:] # limit chat history to last 5 messages
query = chat_json_last_5[-1]['text'] # query that user just asked
chat_history_str = ''
for chat_ in chat_json_last_5[:-1]: # don't add user latest query in chat history
chat_history_str = (
chat_history_str + f"<{chat_['type'].lower()}>: {chat_['text'].strip()}</{chat_['type'].lower()}>\n")
logger.info("<<<<<<<<<<<<<<<<<<<<<Chat History>>>>>>>>>>>>>>>>>>>>")
logger.info(chat_history_str)
logger.info("<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>")
llm_client = AnthropicLLM()
llm_client.add_message(role=WHISPER_USER_ROLE,
content=NARROW_DOWN_MIGRATION_INTENT.format(chat_history=chat_history_str.strip(),
query=query))
complete_response = ''
response_content = ''
action = ''
complete_response = llm_client.process()
if 'Task Finished: True' in complete_response:
action = complete_response.split('Intent Narrowed:')[1]
action = action.split('Task Analysis:')[0]
action = action.strip('\n').strip() if action.startswith('\n') else action
pattern = r'^\d+'
match = re.match(pattern, action)
if match:
action = action[match.end():]
action = action.lstrip("- ") if action.startswith("- ") else action
else: # if "Response:" in complete_response:
response_content = complete_response.split("Response:", 1)[1] # Keep only the content after "Response:"
return action, response_content, complete_response
async def confirmation_bot(chat_id):
from app.models import Message
logger.info(chat_id)
async with db_deps.get_db_session_async_context() as db_client:
messages_obj = (await db_client.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
if not messages_obj:
raise ValueError("No messages found for the given chat_id")
chat_json = [message.to_reference_json() for message in messages_obj]
while len(chat_json) >= 6 and chat_json[-6]['type'] == 'Assistant':
chat_json.pop(0)
chat_json_last_5 = chat_json[-6:] # limit chat history to last 5 messages
query = chat_json_last_5[-1]['text'] # query that user just asked
chat_history_str = ''
for chat_ in chat_json_last_5[:-1]: # don't add user latest query in chat history
chat_history_str = (
chat_history_str + f"<{chat_['type'].lower()}>: {chat_['text'].strip()}</{chat_['type'].lower()}>\n")
logger.debug("<<<<<<<<<<<<<<<<<<<<<Chat History>>>>>>>>>>>>>>>>>>>>")
logger.debug(chat_history_str)
logger.debug("<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>")
llm_client = AnthropicLLM()
if len(chat_json) == 1:
llm_client.add_message(role='user',
content=f'Internal Feedback: Add greetings, start with a polite greeting. Then, without mentioning the word intent or any internal processes, please ask the user to confirm their specific request or goal. Ensure you only seek confirmation of what they want to do, and do not collect any additional requirements or information. Remember, the user is not aware of our internal workings, so keep the language user-friendly and focused on their needs.')
else:
llm_client.add_message(role='user',
content=f'Internal Feedback: Now Without mentioning the word intent or any internal processes, please ask the user to confirm their specific request or goal. Ensure you only seek confirmation of what they want to do, and do not collect any additional requirements or information. Remember, the user is not aware of our internal workings, so keep the language user-friendly and focused on their needs.')
confirmation_tool = {
"name": "confirmation_tool",
"description": "This tool reviews users latest reply to confirmation of the values selected by a tool that has recently completed the payload. If the user confirms or agrees with the choices in the summary displayed, the Confirmation is true, but if user doesn't approve then summary then Confirmation is false."
"else False",
"input_schema": {
"type": "object",
"properties": {
"Confirmation": {
"type": "boolean",
"description": "Confirmation flag that is only True when user confirms the details and is ready to move forward. Analyze user last response. Don't get hallucinated by history"
},
"standalone_query":
{
"type": "string",
"description": "Analyze user latest query and chat history to create standalone query in tone like user is saying"
}
},
"required": [
"Confirmation",
"standalone_query"
]
}
}
chat_response = llm_client.process(
system="You are expert whose job is to analyze user current reply to confirmation and decide if user wants to proceed or not",
tools=[confirmation_tool],
force_tool=True,
tool_name="confirmation_tool")
logger.info("*" * 20)
logger.info(chat_response)
logger.info("*" * 20)
confirmation = chat_response['content'][-1]['input'].get("Confirmation")
standalone = chat_response['content'][-1]['input'].get('standalone_query')
if "Confirmation" not in chat_response['content'][-1]['input']:
llm_client.add_message(role=WHISPER_ASSISTANT_ROLE, content=chat_response['content'])
llm_client.add_message(role=WHISPER_USER_ROLE, content=[{
"type": "tool_result",
"tool_use_id": chat_response[-1]['id'],
"content": 'Please generate confirmation field',
"is_error": True
}])
chat_response = llm_client.process(
system="You are expert whose job is to analyze user current reply to confirmation and decide if user wants to proceed or not. Think step in step in <thinking> tags",
tools=[confirmation_tool],
force_tool=True,
tool_name="confirmation_tool")
confirmation = chat_response['content'][-1]['input']["Confirmation"]
standalone = chat_response['content'][-1]['input'].get('standalone_query') if chat_response['content'][-1][
'input'].get('standalone_query') else standalone
if "standalone_query" not in chat_response['content'][-1]['input']:
llm_client.add_message(role=WHISPER_ASSISTANT_ROLE, content=chat_response['content'])
llm_client.add_message(role=WHISPER_USER_ROLE, content=[{
"type": "tool_result",
"tool_use_id": chat_response[-1]['id'],
"content": 'Please generate confirmation field',
"is_error": True
}])
chat_response = llm_client.process(tools=[confirmation_tool],
force_tool=True,
tool_name="confirmation_tool")
confirmation = chat_response['content'][-1]['input'].get("Confirmation") if chat_response['content'][-1][
'input'].get("Confirmation") else confirmation
standalone = chat_response['content'][-1]['input'].get('standalone_query') if chat_response['content'][-1][
'input'].get('standalone_query') else standalone
logger.info(confirmation)
if confirmation:
return True, standalone
return False, ''
def similarity_search_api_desc(query: str, k=1):
from app.main import app
retrieved_docs = app.state.vector_store_index.similarity_search(query, k=k)
method = [retrieved_docs[i].page_content for i in range(k)]
metadata = [retrieved_docs[i].metadata for i in range(k)]
return [method[0], metadata[0]]
# This is where action begins
async def execute_stage_1(initial_query, chat_id, user_dict, action_id, cloud_type=None, cloud_id=None):
"""
Executes the first stage of an action task based on the user's initial query.
This function is called when a bot finalizes user intent, such as "create a VPC backup" or "delete a COS instance",
which was initially queried from a vector database.
Parameters:
- initial_query (str): The initial query or command from the user.
- db_client (DatabaseClient): The database client used to interact with the database.
- chat_id (str): The ID of the chat session.
- bearer (str): The bearer token for authentication.
- action_id (str): The ID of the action to be executed.
Steps:
1. Retrieve the conversation history for the given chat session using `chat_history_qry.get_chat_history`.
2. Parse the metadata from the conversation history to extract the searched intent and message history.
3. Construct a JSON object representing the chat without including messages.
4. Record the start time for execution timing purposes.
5. Initialize the `ActionPhase` bot with the searched intent and message history.
6. Execute the intent using `intent_execution_bot.start` with the initial query.
7. Parse the bot's response to determine if the task has been finished.
8. Update the metadata based on the task's completion status. If finished, reset the stage, history, and searched intent.
9. Save the updated metadata back to the database.
10. Record the end time and log the total execution duration.
11. Create a new message request object with the bot's response and add it to the chat.
12. Yield the bot's response and chat information as events for the client.
Yields:
- A message event with the bot's response.
- A chat_info event with the updated chat JSON.
- A close event indicating the end of the process.
"""
logger.info("hereeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee")
from app.models import Message, Action, Chat, ActivityTracking
try:
async with db_deps.get_db_session_async_context() as db_session:
result = await db_session.scalars(
select(Action).filter(Action.id == action_id).options(selectinload(Action.messages)))
action = result.unique().one_or_none()
if not action:
raise ValueError("Action not found")
logger.info(action)
action_json = [message.to_reference_json() for message in action.messages[:-1]]
logger.info(action_json)
metadata_dict = json.loads(action.json_metadata)
if not metadata_dict:
raise ValueError("Metadata is empty or invalid")
logger.info(metadata_dict)
logger.info(metadata_dict.get('history'))
searched_intent = metadata_dict["searched_intent"]
if not searched_intent:
raise ValueError("Searched intent not found in metadata")
yield {"event": "action", "data": json.dumps(action.to_reference_json())}
logger.info(user_dict)
complex_bot_action = isinstance(searched_intent[-1]['method']['tool'], list)
if complex_bot_action:
intent_execution_bot = ComplexActionPhaseClaude(intent=searched_intent,
chat_history=metadata_dict.get('history'),
user_id=user_dict['id'],
bearer=user_dict['bearer'],
metadata=metadata_dict,
cloud_id=cloud_id)
elif searched_intent[-1]['method']['tool']['name'] == 'post_migration_request':
intent_execution_bot = MigrationActionPhaseClaude(intent=searched_intent,
chat_history=metadata_dict.get('history'),
user_dict=user_dict, bearer=user_dict['bearer'],
metadata_dict=metadata_dict, action_id=action_id,
cloud_id=cloud_id)
else:
intent_execution_bot = ActionPhaseClaude(intent=searched_intent,
chat_history=metadata_dict.get('history'),
user_id=user_dict['id'],
bearer=user_dict['bearer'],
metadata_dict=metadata_dict,
cloud_id=cloud_id
)
start_time = datetime.now()
response_obj = await intent_execution_bot.start(initial_query, chat_id=chat_id, action_id=action_id)
if complex_bot_action:
metadata_dict.update(intent_execution_bot.get_metadata())
complete_response = ""
if isinstance(response_obj, AsyncGeneratorType):
async for chunk in response_obj:
try:
data = "\n" if not chunk else chunk
yield {"event": "message", "data": data}
complete_response += data
except Exception as e:
logger.error(f"Error processing chunk: {str(e)}")
continue
else:
# If response is a string
response_obj1 = response_obj.replace('```','')
logger.info('printing type of response_obj')
logger.info(type(response_obj1))
marker = "Output Formatted Result:"
if marker in response_obj1:
response_obj1 = response_obj1.split(marker, 1)[-1].strip()
yield {"event": "message", "data": response_obj1}
complete_response = response_obj1
end_time = datetime.now()
logger.info(f"Total Seconds=>{(end_time - start_time).total_seconds()}")
#TODO: Handle the formatting incase of error
# intent_execution_bot.base_llm.messages[-1]["content"]= complete_response
logger.info("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
metadata_dict['history'] = intent_execution_bot.base_llm.messages
logger.info(metadata_dict['history'])
async with db_deps.get_db_session_async_context() as db_client:
action.json_metadata = json.dumps(metadata_dict)
await db_client.commit()
action_message = Message(msg_type=Message.TYPE_ASSISTANT,
content=complete_response.replace('Response:', ''),
msg_category=Message.TYPE_ACTION)
action_message.action = action
db_client.add(action_message)
action_message.action = action
db_client.add(action_message)
await db_client.commit()
except ValueError as e:
logger.error(f"An error occurred in execute_stage_1: {str(e)}")
yield {"event": "error", "data": json.dumps({"detail": str(e)})}
finally:
async with db_deps.get_db_session_async_context() as db_client:
activities = (await db_client.scalars(select(ActivityTracking).filter(
ActivityTracking.action_id == action_id))).all()
if activities:
for activity in activities:
if activity.is_polled == False:
yield {"event": "task", "data": json.dumps(await activity.to_event_json(db_client))}
activity.is_polled = True
yield {"event": "action", "data": json.dumps(action.to_reference_json())}
async with db_deps.get_db_session_async_context() as db_client:
chat = (await db_client.scalars(select(Chat).filter(Chat.id == chat_id))).one_or_none()
if chat:
yield {"event": "chat_info", "data": json.dumps(chat.to_reference_json())}
else:
logger.warning(f"Chat with ID {chat_id} not found.")
yield {"event": "close"}
async def get_information_from_db(user_id, question, chat_id, cloud_id=None,
cloud_type=None):
from app.whisper.utils.information_retrieval_engine.base import RetrievalPhaseClaude
from app.models import Message
error = None
import traceback
try:
async with db_deps.get_db_session_async_context() as db_client:
messages_obj = (await db_client.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
if not messages_obj:
raise ValueError("No messages found for the given chat_id")
chat_json = [message.to_reference_json() for message in messages_obj]
chat = messages_obj[0].chat
while len(chat_json) >= 6 and chat_json[-6]['type'] == 'Assistant':
chat_json.pop(0)
chat_json_last_5 = chat_json[-7:-1] # limit chat history to last 5 messages
retrieval_client = RetrievalPhaseClaude(chat_history=chat_json_last_5, user_id=user_id['id'],
llm_chat_history=chat.json_metadata, bearer=user_id['bearer'],
chat_id=chat_id, cloud_id=cloud_id, cloud_type=cloud_type
)
response_obj = await retrieval_client.start(question)
complete_response = ""
logger.info('printing response obj')
logger.info(response_obj)
if isinstance(response_obj, types.AsyncGeneratorType):
logger.info('in the async generator')
async for chunk in response_obj:
try:
# data = "" if not chunk else chunk
# data.replace('\n', '<br>')
logger.info(chunk)
yield {"event": "message", "data": chunk}
complete_response += chunk
except Exception as e:
logger.error(f"Error processing chunk: {str(e)}")
continue
else:
# If response is a string
response_obj1 = response_obj.replace('```','')
logger.info('printing type of response_obj')
logger.info(type(response_obj1))
marker = "Output Formatted Result:"
if marker in response_obj1:
response_obj1 = response_obj1.split(marker, 1)[-1].strip()
yield {"event": "message", "data": response_obj1}
complete_response = response_obj1
logger.info("*******************************FORMATTED CONTENT**************")
logger.info(complete_response)
logger.info("*******************************FORMATTED CONTENT**************")
async with db_deps.get_db_session_async_context() as db_client:
chat_message = Message(
msg_type=Message.TYPE_ASSISTANT,
content=complete_response,
msg_category=Message.TYPE_QNA
)
chat_message.chat = chat
chat.json_metadata = retrieval_client.base_llm.messages
logger.info(retrieval_client.base_llm.messages)
db_client.add(chat_message)
await db_client.commit()
except ValueError as e:
logger.error(f"An error occurred in get_information_from_db: {str(e)}")
error = {"event": "error", "data": json.dumps({"detail": str(e)})}
except Exception as e:
logger.error(f"An error occurred while retrieving information: {traceback.format_exc()} ->{str(e)}")
error = {"event": "error", "data": json.dumps({"detail": "Internal server error"})}
finally:
if error:
yield error
yield {"event": "chat_info", "data": json.dumps(chat.to_reference_json())}
yield {"event": "close"}
def format_response(response: str):
if response is not None:
return response.replace('|', '| ')
else:
return ''
async def check_cloud_account_status(chat_id, api_endpoint, tool,
authorization: HTTPAuthorizationCredentials):
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {authorization.credentials.strip('')}"
}
base_url = f'{settings.web.AUTH_LINK}{api_endpoint}'
try:
async with httpx.AsyncClient() as client:
response = await client.get(
f'{base_url}',
headers=headers,
timeout=10
)
response.raise_for_status()
logger.info(f"Response Status Code: {response.status_code}")
payload = response.json() if response.content else None
if tool == "ClassicMigration":
cloud_message = IBM_CLASSIC_CLOUD_ACCOUNT_MESSAGE
elif tool == "Action":
cloud_message = IBM_CLOUD_ACCOUNT_MESSAGE
elif tool == "ScheduleCall":
cloud_message = GENERAL_CLOUD_ACCOUNT_MESSAGE
elif tool == "InformationRetrievalClassic":
cloud_message = IBM_CLASSIC_CLOUD_ACCOUNT_MESSAGE
elif tool == "InformationRetrievalAction":
cloud_message = IBM_CLOUD_ACCOUNT_MESSAGE
else:
cloud_message = "Please check your cloud accounts to ensure they are properly configured and valid."
return cloud_message
if not payload:
if tool in ["InformationRetrievalClassic", "InformationRetrievalAction"]:
return cloud_message
else:
return EventSourceResponse(api_key_msg_event_generator(
api_key_msg=cloud_message.format(
vpcplus_url=f"{base_url}",
cloud_whisper_url=f"{settings.web.BACKEND_URI}{api_endpoint}"
),
chat_id=chat_id
))
cloud_accounts = payload.get('items', [])
logger.info(f"Retrieved cloud accounts: {cloud_accounts}")
is_cloud_account_valid = any(account.get('status') == 'VALID' for account in cloud_accounts)
if not is_cloud_account_valid:
logger.info("No valid cloud accounts found.")
return EventSourceResponse(api_key_msg_event_generator(
api_key_msg="The cloud account status is currently invalid. Please check your cloud account and ensure it is properly configured and valid.",
chat_id=chat_id,
))
except httpx.RequestError as e:
logger.error(f"Error fetching cloud accounts: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error fetching cloud accounts: {str(e)}")
return None
|
CloudWhisperCustomBot | app/web/common/db_deps.py | import asyncio
from typing import AsyncGenerator
from contextlib import asynccontextmanager
from loguru import logger
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import sessionmaker
from app.core.config import settings
AsyncSessionLocal = sessionmaker(
bind=create_async_engine(
settings.db.SQLALCHEMY_DATABASE_URI,
pool_recycle=settings.db.SQLALCHEMY_POOL_RECYCLE,
pool_timeout=settings.db.SQLALCHEMY_POOL_TIMEOUT,
pool_size=settings.db.SQLALCHEMY_POOL_SIZE,
max_overflow=settings.db.SQLALCHEMY_MAX_OVERFLOW,
),
class_=AsyncSession,
expire_on_commit=False,
autocommit=False,
autoflush=False
)
async def get_db_session_async() -> AsyncGenerator[AsyncSession, None]:
try:
async with AsyncSessionLocal() as session:
logger.success("Success: connection to the database")
yield session
except Exception:
await session.rollback()
raise
finally:
logger.info("Closing connection to the database")
await session.close()
@asynccontextmanager
async def get_db_session_async_context() -> AsyncGenerator[AsyncSession, None]:
try:
async with AsyncSessionLocal() as session:
logger.success("Success: connection to the database")
yield session
except Exception:
await session.rollback()
raise
finally:
logger.info("Closing connection to the database")
await session.close()
def get_sync_session():
loop = asyncio.get_event_loop()
async_session = AsyncSessionLocal()
return loop.run_until_complete(async_session.__aenter__())
|
CloudWhisperCustomBot | app/web/common/deps.py | import httpx
from fastapi import Depends, Header, HTTPException, WebSocketException
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from httpx import Response
from loguru import logger
from app.api_discovery.utils import update_profile_with_vpcplus_api_key
from app.core.config import settings
from app.web.common import db_deps
from sqlalchemy import select
from app import models
security = HTTPBearer()
async def authenticate_user(
project_id: str = Header(None, convert_underscores=False),
authorization: HTTPAuthorizationCredentials = Depends(security)
) -> Response | dict:
from app.web.profiles.schemas import OnboardingStatus
if authorization.scheme != "Bearer":
raise HTTPException(status_code=401, detail="Invalid authentication scheme")
headers = {'Authorization': f"Bearer {authorization.credentials}"}
if project_id:
headers["project_id"] = project_id
async with httpx.AsyncClient(timeout=30.0) as http_client:
resp = await http_client.get(f"{settings.web.AUTH_LINK}/v1/users/verify", headers=headers)
if not resp or resp.status_code != 200:
raise HTTPException(status_code=401)
if project_id:
data = resp.json()
data["project_id"] = project_id
return data
user_json = resp.json()
async with db_deps.get_db_session_async_context() as session:
result = await session.execute(select(models.Profile).filter_by(user_id=user_json['id']).limit(1))
profile = result.scalar_one_or_none()
logger.info(f"user: {profile}")
if not profile:
profile = models.Profile(
user_id=user_json["id"], name=user_json["name"], project_id=user_json.get("project_id", ""),
is_admin=bool(user_json.get("is_admin") or False), email=user_json["email"], onboarding=OnboardingStatus.app_tour
)
logger.info(profile.to_json())
session.add(profile)
await session.commit()
result = await session.execute(select(models.Profile).filter_by(user_id=user_json['id']).limit(1))
profile = result.scalar_one_or_none()
logger.info(f"{dir(profile)}")
user_json["uuid"] = profile.id
user_json["id"] = profile.user_id
user_json["onboarding"] = profile.onboarding
# If User has already VPC+ API Key created, fetch it from Auth Service and store it in order to run discovery
headers = {"Authorization": f"Bearer {authorization.credentials}"}
with httpx.Client() as http_client:
resp = http_client.get(f"{settings.web.AUTH_LINK}/v1/users/api_key", headers=headers)
if resp.status_code == 200:
api_key_json = resp.json()
api_key = api_key_json.get("key")
api_key_name = api_key_json.get("name")
api_key_expiry = api_key_json.get("expires_at")
async with db_deps.get_db_session_async_context() as session:
await update_profile_with_vpcplus_api_key(
profile_id=user_json["id"],
api_key=api_key, api_key_name=api_key_name, api_key_expiry=api_key_expiry if api_key_expiry else None
)
user_json["bearer"] = f"Bearer {authorization.credentials}"
user_json["appearance"] = profile.appearance if profile.appearance else None
return user_json
async def first_message_handler(websocket) -> Response | dict:
from app.web.profiles.schemas import OnboardingStatus
token = await websocket.receive_text()
headers = {'Authorization': f"Bearer {token}"}
async with httpx.AsyncClient() as http_client:
resp = await http_client.get(f"{settings.web.AUTH_LINK}/v1/users/verify", headers=headers)
# if user is None:
if not resp or resp.status_code != 200:
raise WebSocketException(code=1008, reason="Policy Violation, User not found") #returns the control and closes the connection
user_json = resp.json()
async with db_deps.get_db_session_async_context() as session:
result = await session.execute(select(models.Profile).filter_by(user_id=user_json['id']).limit(1))
profile = result.scalar_one_or_none()
logger.info(f"user: {profile}")
if not profile:
profile = models.Profile(
user_id=user_json["id"], name=user_json["name"], project_id=user_json.get("project_id", ""),
is_admin=bool(user_json["is_admin"]), email=user_json["email"], onboarding=OnboardingStatus.app_tour
)
logger.info(profile.to_json())
session.add(profile)
await session.commit()
result = await session.execute(select(models.Profile).filter_by(user_id=user_json['id']).limit(1))
profile = result.scalar_one_or_none()
logger.info(f"{dir(profile)}")
user_json["uuid"] = profile.id
user_json["id"] = profile.user_id
user_json["onboarding"] = profile.onboarding
# If User has already VPC+ API Key created, fetch it from Auth Service and store it in order to run discovery
with httpx.Client() as http_client:
resp = http_client.get(f"{settings.web.AUTH_LINK}/v1/users/api_key", headers=headers)
if resp.status_code == 200:
api_key_json = resp.json()
api_key = api_key_json.get("key")
api_key_name = api_key_json.get("name")
api_key_expiry = api_key_json.get("expires_at")
async with db_deps.get_db_session_async_context() as session:
await update_profile_with_vpcplus_api_key(
profile_id=user_json["id"],
api_key=api_key, api_key_name=api_key_name, api_key_expiry=api_key_expiry if api_key_expiry else None
)
user_json["bearer"] = f"Bearer {token}"
return user_json
|
CloudWhisperCustomBot | app/web/common/api_path_to_fields.json | {
"Create IBM VPC backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/vpcs": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM Clouds": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM VPC Networks": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/vpcs": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM Regions": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
}
},
"List IBM Draas backups": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/draas_blueprints": {
"GET": {
"fields": [
"id",
"name",
"backups"
],
"nested_fields": {
"backups": [
"id",
"name"
]
}
}
}
},
"Create IBM COS bucket backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/cloud_object_storages/buckets": {
"GET": {
"fields": [
"id",
"name",
"cloud_object_storage",
"cos_bucket_versioning",
"regions"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM COS buckets": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/cloud_object_storages/buckets": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"list IBM COS bucket instances": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM Kubernetes Clusters": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/kubernetes_clusters": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"Create IBM IKS Cluster backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/kubernetes_clusters": {
"GET": {
"fields": [
"id",
"name",
"master_kube_version"
]
}
},
"v1/ibm/kubernetes_clusters/temp": {
"GET": {
"fields": [
"id",
"workloads"
]
}
},
"v1/ibm/cloud_object_storages/buckets": {
"GET": {
"fields": [
"id",
"name",
"cloud_object_storage",
"cos_bucket_versioning",
"regions"
]
}
},
"v1/ibm/cloud_object_storages/keys": {
"GET": {
"fields": [
"id",
"name",
"is_hmac"
]
}
}
},
"List IBM COS bucket credential keys": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/cloud_object_storages/keys": {
"GET": {
"fields": [
"id",
"name",
"is_hmac"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List a single IBM Kubernetes Cluster": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name",
"zones"
]
}
},
"v1/ibm/kubernetes_clusters": {
"GET": {
"fields": [
"id",
"name",
"master_kube_version"
]
}
},
"v1/ibm/kubernetes_clusters/temp": {
"GET": {
"fields": [
"id",
"workloads"
]
}
}
},
"List all IBM VSI Instances": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name",
"zones"
]
}
},
"v1/ibm/instances": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"Create IBM VSI backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name",
"zones"
]
}
},
"v1/ibm/instances": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/backup_policies": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List all IBM Backup Policies": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/backup_policies": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List a single IBM VSI": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name",
"zones"
]
}
},
"v1/ibm/instances": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/instances/temp": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"Create scheduled IBM VSI backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/instances": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/backup_policies": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"Restore IBM IKS Cluster backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/draas_blueprints": {
"GET": {
"fields": [
"id",
"name",
"backups",
"resource_metadata"
],
"nested_fields": {
"backups": [
"id",
"name"
]
}
}
},
"v1/ibm/kubernetes_clusters": {
"GET": {
"fields": [
"id",
"name",
"master_kube_version"
]
}
}
},
"Restore IBM IKS Cluster backup in existing IBM VPC": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/draas_blueprints": {
"GET": {
"fields": [
"id",
"name",
"backups"
],
"nested_fields": {
"backups": [
"id",
"name"
],
"resource_metadata": [
"cluster_id",
"blueprint_name"
]
}
}
},
"v1/ibm/vpcs": {
"GET": {
"fields": [
"id",
"name"
],
"nested_fields": {
"associated_resources": [
"subnets"
]
}
}
},
"v1/ibm/resource_groups": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/subnets": {
"GET": {
"fields": [
"id",
"name",
"zone"
]
}
}
}
} |
CloudWhisperCustomBot | app/web/common/cloud_setup_instruction_messages.py | API_KEY_MESSAGE ="""Cloud Whisper requires a VPC+ API key to discover data and perform actions. Please follow these steps to create your API key:\n
1. Create your VPC+ API Key: \n \t \n
a. Click on User name in the bottom left corner and select Settings \n \t \n
b. Navigate to the "API Key" section \n \t \n
c. Create a new API key:\n \t \n
- Provide a name for your key\n \t \n
- Add a description\n \t \n
- Set an expiration time (optional)\n\n
Once completed, Cloud Whisper will be able to access the necessary VPC+ data for its operations.\n\n
If you encounter any issues during this process, please don't hesitate to contact our support team.
"""
IBM_CLASSIC_CLOUD_ACCOUNT_MESSAGE = """Cloud Whisper requires a connected IBM Classic Cloud account to discover data and perform actions. Please follow these steps to add your IBM Classic Cloud account:\n
1. Add your IBM Classic Cloud Account: \n \t \n
a. Select IBM Classic Cloud Accounts on the left bottom corner of the interface.\n \t \n
b. Click on Add Account.\n \t \n
c. Fill in the Name, Username and API key.\n \t \n
d. Click Add to create and save your IBM Classic Cloud account.\n\n
Once completed, Cloud Whisper will be able to access the necessary data for its operations.\n\n
If you encounter any issues during this process, please contact our support team for assistance.
"""
IBM_CLOUD_ACCOUNT_MESSAGE = """Cloud Whisper requires a connected IBM Cloud account to discover data and perform actions. Please follow these steps to add your IBM Cloud account:\n
1. Add your IBM Cloud Account:\n \t \n
a. Select IBM Cloud Accounts on the left bottom corner of the interface.\n \t \n
b. Click on Add Account.\n \t \n
c. Fill in the Name and API key.\n \t \n
d. Click Add to create and save your cloud account.\n\n
Once completed, Cloud Whisper will be able to access the necessary data for its operations.\n\n
If you encounter any issues during this process, please contact our support team for assistance.
"""
GENERAL_CLOUD_ACCOUNT_MESSAGE = """Cloud Whisper requires a connected IBM Cloud and IBM Classic Cloud account to discover data and perform actions. Please follow these steps to add your account:\n
1. Add your IBM Cloud and IBM Classic Cloud Account: \n \t \n
a. Select IBM Cloud Accounts and IBM Classic Cloud Accounts from the bottom left corner of the interface.\n \t \n
b. Click on Add Account.\n \t \n
c. For IBM Cloud accounts, fill in the Name and API key. For IBM Classic Cloud accounts, fill in the Name, Username, and API key.\n \t \n
d. Click Add to create and save your account.\n\n
Once completed, Cloud Whisper will be able to access the necessary data for its operations.\n\n
If you encounter any issues during this process, please contact our support team for assistance.
"""
|
CloudWhisperCustomBot | app/web/activity_tracking/neo4j_query.py | get_vpc_backups_query = """
MATCH (v:VPC)
OPTIONAL MATCH (b:VPCBackup {name: v.name})
WITH v, b
WHERE b IS NULL AND v.cloud_id = '{cloud_id}'
RETURN v.cloud_id, v.name
"""
get_iks_backups_query = """
MATCH (v:KubernetesCluster)
OPTIONAL MATCH (b:IKSBackupDetails {name: v.name})
WITH v, b
WHERE b IS NULL AND v.cloud_id = '{cloud_id}'
RETURN v.cloud_id, v.name
"""
get_cos_backups_query = """
MATCH (v:COSBucket)
OPTIONAL MATCH (b:COSBucketBackupDetails {name: v.name})
WITH v, b
WHERE b IS NULL AND v.cloud_id = '{cloud_id}'
RETURN v.cloud_id, v.name
"""
get_vsi_backups_query = """
MATCH (v:VirtualServerInstance)
OPTIONAL MATCH (b:VirtualServerInstanceBackup {name: v.name})
WITH v, b
WHERE b IS NULL AND v.cloud_id = '{cloud_id}'
RETURN v.cloud_id, v.name
"""
|
CloudWhisperCustomBot | app/web/activity_tracking/__init__.py | from .api import activity_tracking_n_recommendations
__all__ = ["activity_tracking_n_recommendations"]
|
CloudWhisperCustomBot | app/web/activity_tracking/api.py | from math import ceil
from typing import List, Optional
from typing import Annotated
from fastapi import APIRouter, Depends, HTTPException, Query
from sqlalchemy.ext.asyncio import AsyncSession
from loguru import logger
from sqlalchemy import select
from pydantic import conint
from sqlalchemy.orm import undefer
from sqlalchemy import desc, func
from app import models
from app.core.config import settings
from app.web.activity_tracking.neo4j_query import get_vpc_backups_query, get_iks_backups_query, get_cos_backups_query, \
get_vsi_backups_query
from app.web.common import deps, db_deps
from app.whisper.utils.neo4j.client import Neo4j
activity_tracking_n_recommendations = APIRouter()
@activity_tracking_n_recommendations.get("", name="Get Activities (Backup and Restore) Performed & Recommendations")
async def get_activity_n_recommendations(
cloud_id: str,
user=Depends(deps.authenticate_user),
recommendation: bool = None,
status: Annotated[list[str] | None, Query()] = None,
cloud: Annotated[list[str] | None, Query()] = None,
start: conint(ge=1) = 1,
limit: conint(ge=1, le=settings.pagination_config.MAX_PAGE_LIMIT) = settings.pagination_config.DEFAULT_LIMIT
):
from app.main import app
response = []
# Only add recommendations if it's the first page
if start == 1 and recommendation:
neo4j_client = Neo4j(db_session=app.state.neo4j_session, user_id=user["id"])
# Add VPC backup recommendation
vpc_query = get_vpc_backups_query.replace("{cloud_id}", cloud_id)
vpc_result = neo4j_client.query_database(vpc_query)
if vpc_result:
vpc_backup_recommendation_dict = {
'type': 'recommendation',
'cloud': 'ibm',
'status': 'info',
'title': 'You have multiple VPCs which are not backed up, would you like to back them up?',
'prompt': 'How many VPCs do I have which are not backed up? I need to back them up',
}
response.append(vpc_backup_recommendation_dict)
# Add IKS backup recommendation
iks_query = get_iks_backups_query.replace("{cloud_id}", cloud_id)
iks_result = neo4j_client.query_database(iks_query)
if iks_result:
iks_backup_recommendation = {
'type': 'recommendation',
'cloud': 'ibm',
'status': 'info',
'title': 'You have multiple IKS clusters which are not backed up, would you like to back them up?',
'prompt': 'How many IKS clusters do I have which are not backed up? I need to back them up',
}
response.append(iks_backup_recommendation)
# Add COS Buckets backup recommendation
cos_buckets_query = get_cos_backups_query.replace("{cloud_id}", cloud_id)
cos_result = neo4j_client.query_database(cos_buckets_query)
if cos_result:
cos_buckets_backup_recommendation = {
'type': 'recommendation',
'cloud': 'ibm',
'status': 'info',
'title': 'You have multiple COS Buckets which are not backed up, would you like to back them up?',
'prompt': 'How many COS Buckets do I have which are not backed up? Can you show them?',
}
response.append(cos_buckets_backup_recommendation)
# Add VSI backup recommendation
vsi_query = get_vsi_backups_query.replace("{cloud_id}", cloud_id)
vsi_result = neo4j_client.query_database(vsi_query)
if vsi_result:
vsi_backup_recommendation = {
'type': 'recommendation',
'cloud': 'ibm',
'status': 'info',
'title': 'You have multiple Virtual Server Instances (VSIs) which are not backed up, would you like to back them up?',
'prompt': 'How many Virtual Server Instances (VSIs) do I have which are not backed up? I need to back them up',
}
response.append(vsi_backup_recommendation)
async with db_deps.get_db_session_async_context() as db_session:
# Pagination logic for activities
total = await db_session.scalar(select(func.count(models.ActivityTracking.id)).filter_by(user_id=user["id"]))
pages = ceil(total / limit)
if start > pages:
start = 1
offset = (start - 1) * limit
filters = {"user_id": user["id"]}
# TODO: Will have to add the cloud filter as well when other cloud like (Softlayer or AWS etc.) comes in
if status:
activities = (await db_session.scalars(
select(models.ActivityTracking).filter(models.ActivityTracking.status.in_(status)).filter_by(**filters)
.options(undefer(models.ActivityTracking.fe_request_data))
.order_by(desc(models.ActivityTracking.created_at)).offset(offset).limit(limit)
)).all()
else:
activities = (await db_session.scalars(
select(models.ActivityTracking).filter_by(**filters)
.options(undefer(models.ActivityTracking.fe_request_data))
.order_by(desc(models.ActivityTracking.created_at)).offset(offset).limit(limit)
)).all()
if activities:
for activity in activities:
action = (
await db_session.scalars(
select(models.Action).filter(models.Action.id == activity.action_id))).one_or_none()
activity_dict = {
'type': 'action',
'cloud': 'ibm',
'status': activity.status,
'title': f"{action.name} of {activity.resource_name}",
'json': await activity.to_json(db_session)
}
response.append(activity_dict)
return {
"items": response,
"previous": start - 1 if start > 1 else None,
"next": start + 1 if start < pages else None,
"pages": pages,
"total": total
}
@activity_tracking_n_recommendations.get("/{workflow_id}", name="Get a Workflow by ID")
async def get_activity(
workflow_id: str,
user=Depends(deps.authenticate_user),
):
async with db_deps.get_db_session_async_context() as db_session:
activity = (await db_session.scalars(select(models.ActivityTracking).filter(
models.ActivityTracking.workflow_id == workflow_id).options(undefer(models.ActivityTracking.fe_request_data)))
).one_or_none()
if not activity:
logger.error(f"No activity found with ID: {workflow_id}")
raise HTTPException(status_code=404, detail=f"No activity found with ID: {workflow_id}")
return await activity.to_json(db_session)
|
CloudWhisperCustomBot | app/web/chats/schemas.py | import datetime
import typing as t
import uuid
from enum import Enum
from pydantic import BaseModel, Field
class MessageTypeEnum(str, Enum):
Human = 'Human'
Assistant = 'Assistant'
class ChatTypeEnum(str, Enum):
QnA = 'QnA'
Action = 'Action'
class MessageType(BaseModel):
type: MessageTypeEnum
class MessageRequest(BaseModel):
text: str
type: t.Optional[str] = "Human"
class MessageResponse(BaseModel):
id: uuid.UUID
sent_at: datetime.datetime
text: str
type: MessageType
class ChatRequest(BaseModel):
question: str
chat_id: t.Optional[str] = Field(default=None, max_length=32, min_length=32, regex="^[0-9a-fA-F]+$")
action_id: t.Optional[str] = Field(default=None, max_length=32, min_length=32, regex="^[0-9a-fA-F]+$")
regenerate: t.Optional[bool] = Field(default=False)
cloud_account_id: t.Optional[str] = Field(default=None)
cloud_type: t.Optional[str] = Field(default=None)
class GenerateTitleRequest(BaseModel):
message_id: uuid.UUID
class UpdateChatRequest(BaseModel):
is_visible: t.Optional[bool]
title: t.Optional[str]
metadata: t.Optional[dict]
class StreamConversationRequest(BaseModel):
message_id: uuid.UUID
|
CloudWhisperCustomBot | app/web/chats/__init__.py | from .api import whisper_chats
__all__ = ["whisper_chats"]
|
CloudWhisperCustomBot | app/web/chats/api.py | import json
import time
from http import HTTPStatus
from math import ceil
from fastapi import APIRouter, Depends, HTTPException
from fastapi.security import HTTPAuthorizationCredentials
from loguru import logger
from pydantic import conint
from sqlalchemy import func
from sqlalchemy import select, desc, update, asc
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import joinedload, selectinload
from sse_starlette import EventSourceResponse
from app.core.config import settings
from app.web.chats.schemas import ChatRequest, UpdateChatRequest
from app.web.common import db_deps, deps
from app.web.common.chats_websockets_utils import process_routing, execute_qna, narrow_down_intent, execute_stage_1, \
get_information_from_db, confirmation_bot, similarity_search_api_desc, check_cloud_account_status, \
narrow_down_migration_intent
from app.web.common.cloud_setup_instruction_messages import API_KEY_MESSAGE
from app.web.common.deps import security
from app.web.common.utils import api_key_msg_event_generator, user_msg_event_generator
whisper_chats = APIRouter()
@whisper_chats.post("")
async def create_chat(
conv_req: ChatRequest,
user=Depends(deps.authenticate_user),
authorization: HTTPAuthorizationCredentials = Depends(security),
):
from app.models.chat import Chat, Message, Action
from app.models.profile import Profile
start = time.time()
chat_id, action_id, regenerate, confirmation_stage, cloud_account_id, cloud_type = (conv_req.chat_id,
conv_req.action_id,
conv_req.regenerate,
False,
conv_req.cloud_account_id,
conv_req.cloud_type)
cloud_account_id = cloud_account_id if cloud_account_id != 'all' else None
logger.info(f"Provided question: {conv_req}")
async with db_deps.get_db_session_async_context() as db_session:
if chat_id:
logger.info(f"********************action id**********************: {action_id}")
chat = ((await db_session.scalars(select(Chat).filter_by(id=chat_id).options(selectinload(Chat.messages)))).
one_or_none())
logger.info(f"chat: {chat}")
if regenerate:
query = select(Message).options(
selectinload(Message.action) if action_id else selectinload(Message.chat)
).order_by(desc(Message.sent_at))
confirmation_stage = chat.confirmation_stage
if action_id:
query = query.filter_by(action_id=action_id)
else:
query = query.filter_by(chat_id=chat_id)
chat_message = (await db_session.scalars(query)).first()
if chat_message and chat_message.type == Message.TYPE_HUMAN:
conv_req.question = chat_message.content
else:
if action_id:
action = (await db_session.scalars(select(Action).filter_by(id=action_id))).one_or_none()
logger.info(f"Action {action.id}")
if not action:
raise HTTPException(status_code=404, detail="Action not found")
chat_message = Message(msg_type=Message.TYPE_HUMAN, content=conv_req.question,
msg_category=Message.TYPE_ACTION)
logger.info(chat_message.id)
chat_message.action = action
db_session.add(chat_message)
await db_session.commit()
else:
chat_message = Message(msg_type=Message.TYPE_HUMAN, content=conv_req.question,
msg_category=Message.TYPE_ACTION)
chat_message.chat = chat
confirmation_stage = chat.confirmation_stage
db_session.add(chat_message)
await db_session.commit()
else:
logger.info("Creating new chat")
profile = (await db_session.scalars(select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
chat = Chat(title=conv_req.question[:80], chat_type=Chat.TYPE_QNA)
chat_message = Message(msg_type=Message.TYPE_HUMAN, content=conv_req.question, msg_category=Message.TYPE_QNA)
chat_id, chat.profile, chat_message.chat = chat.id, profile, chat
db_session.add(chat)
db_session.add(chat_message)
await db_session.commit()
logger.info(confirmation_stage)
action_confirmed, standalone = '', ''
if confirmation_stage:
async with db_deps.get_db_session_async_context() as db_session:
action_confirmed, standalone = await confirmation_bot(chat_id=chat_id)
logger.info(action_confirmed)
logger.info(standalone)
chat.confirmation_stage = False
await db_session.commit()
if action_confirmed:
async with db_deps.get_db_session_async_context() as db_session:
profile = (await db_session.scalars(select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
api_key, api_key_status = profile.api_key, profile.api_key_status
if not api_key or api_key_status == settings.api_key_status.STATUS_INVALID:
return EventSourceResponse(api_key_msg_event_generator(
api_key_msg=API_KEY_MESSAGE.format(vpcplus_url=f"{settings.web.AUTH_LINK}/settings",
cloud_whisper_url=f"{settings.web.BACKEND_URI}/settings"),
chat_id=chat_id))
api_endpoint = "/v1/ibm/clouds"
tool = "Action"
cloud_account_check_response = await check_cloud_account_status(chat_id, api_endpoint, tool,
authorization=authorization)
if cloud_account_check_response:
return cloud_account_check_response
async with db_deps.get_db_session_async_context() as db_session:
messages_obj = (await db_session.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
metadata = json.loads(messages_obj[-3].json_metadata)
metadata['initial_query'] = standalone
logger.info(metadata["searched_intent"][0])
action_obj = Action(name=metadata["searched_intent"][0], metadata=json.dumps(metadata))
action_id = action_obj.id
dummy_message_content = "This action is performed by another bot. You can start with a fresh conversation or continue with a new context."
dummy_message = Message(
msg_type=Message.TYPE_ASSISTANT,
content=dummy_message_content,
msg_category=Message.TYPE_ACTION,
is_visible=False,
)
dummy_message.chat = chat
db_session.add(dummy_message)
messages_obj[-1].action_id = action_obj.id
db_session.add(action_obj)
chat_message.msg_category = Message.TYPE_ACTION
await db_session.commit()
EventSourceResponse({"event": "action", "data": json.dumps(action_obj.to_reference_json())})
return EventSourceResponse(execute_stage_1(initial_query=standalone, user_dict=user,
chat_id=chat_id, action_id=action_id, cloud_type=cloud_type,
cloud_id=cloud_account_id))
else:
standalone_query, tool = await process_routing(chat_id=chat_id)
end = time.time()
logger.info(
f'tool from process_routing is {tool}\nTime it took {end - start}s\nUser query: {standalone_query}')
if tool == 'QnA_or_Schedule_a_call':
return EventSourceResponse(execute_qna(question=conv_req.question,
chat_id=chat_id,
user_name=user["name"]),
media_type="text/event-stream")
elif tool == 'Action':
async with db_deps.get_db_session_async_context() as db_session:
profile = (await db_session.scalars(select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
api_key, api_key_status = profile.api_key, profile.api_key_status
if not api_key or api_key_status == settings.api_key_status.STATUS_INVALID:
return EventSourceResponse(api_key_msg_event_generator(
api_key_msg=API_KEY_MESSAGE.format(vpcplus_url=f"{settings.web.AUTH_LINK}/settings",
cloud_whisper_url=f"{settings.web.BACKEND_URI}/settings"),
chat_id=chat_id))
api_endpoint = "/v1/ibm/clouds"
tool = "Action"
cloud_account_check_response = await check_cloud_account_status(chat_id, api_endpoint, tool,
authorization=authorization)
if cloud_account_check_response:
return cloud_account_check_response
return EventSourceResponse(
narrow_down_intent(chat_id=chat_id, user_dict=user,
standalone_query=standalone_query, action_id=action_id,
cloud_id=cloud_account_id),
media_type="text/event-stream")
elif tool == 'ClassicMigration':
# Identify is user wants simple request and wants to discuss over call or wants to start migration steps.
action, response_content, complete_response = await narrow_down_migration_intent(chat_id=chat_id,
db_client=db_session,
user_dict=user,
standalone_query=standalone_query)
if action:
# TODO: Handle this efficiently, make it robust
if action.startswith(('START', 'start')):
async with db_deps.get_db_session_async_context() as db_session:
profile = (
await db_session.scalars(
select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
api_key, api_key_status = profile.api_key, profile.api_key_status
if not api_key or api_key_status == settings.api_key_status.STATUS_INVALID:
return EventSourceResponse(api_key_msg_event_generator(
api_key_msg=API_KEY_MESSAGE.format(vpcplus_url=f"{settings.web.AUTH_LINK}/settings",
cloud_whisper_url=f"{settings.web.BACKEND_URI}/settings"),
chat_id=conv_req.chat_id))
api_endpoint = "/v1/softlayer/accounts"
tool = "ClassicMigrationAction"
cloud_account_check_response = await check_cloud_account_status(chat_id,
api_endpoint, tool,
authorization=authorization)
if cloud_account_check_response:
return cloud_account_check_response
async with db_deps.get_db_session_async_context() as db_session:
messages_obj = (await db_session.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(
asc(Message.sent_at)))).all()
action_tool = similarity_search_api_desc("migrate_ibm_classic")
messages_obj[-1].json_metadata = json.dumps({
'searched_intent': action_tool,
'task_finished': False,
'stage': 1,
'history': []
})
metadata = json.loads(messages_obj[-3].json_metadata)
metadata['initial_query'] = standalone
logger.info(metadata["searched_intent"][0])
action_obj = Action(name=metadata["searched_intent"][0], metadata=json.dumps(metadata))
action_id = action_obj.id
dummy_message_content = "This action is performed by another bot. You can start with a fresh conversation or continue with a new context."
dummy_message = Message(
msg_type=Message.TYPE_ASSISTANT,
content=dummy_message_content,
msg_category=Message.TYPE_ACTION,
is_visible=False,
)
dummy_message.chat = chat
db_session.add(dummy_message)
messages_obj[-1].action_id = action_obj.id
db_session.add(action_obj)
chat_message.msg_category = Message.TYPE_ACTION
await db_session.commit()
EventSourceResponse({"event": "action", "data": json.dumps(action_obj.to_reference_json())})
return EventSourceResponse(
execute_stage_1(initial_query=standalone, user_dict=user,
chat_id=chat_id, action_id=action_id, cloud_id=cloud_account_id))
else:
return EventSourceResponse(execute_qna(question=conv_req.question,
chat_id=chat_id,
user_name=user["name"]),
media_type="text/event-stream")
else:
if response_content:
return EventSourceResponse(
user_msg_event_generator(response_for_user=response_content,
chat_id=chat_id
))
elif tool == 'InformationRetrieval':
logger.info(f"RETRIEVING DATA with cloud_id->{cloud_account_id} and cloud_type->{cloud_type}")
async with db_deps.get_db_session_async_context() as db_session:
profile = (
await db_session.scalars(
select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
api_key, api_key_status = profile.api_key, profile.api_key_status
if not api_key or api_key_status == settings.api_key_status.STATUS_INVALID:
return EventSourceResponse(api_key_msg_event_generator(
api_key_msg=API_KEY_MESSAGE.format(
vpcplus_url=f"{settings.web.AUTH_LINK}/settings",
cloud_whisper_url=f"{settings.web.BACKEND_URI}/settings"
),
chat_id=chat_id,
))
return EventSourceResponse(
get_information_from_db(
question=conv_req.question,
chat_id=chat_id,
user_id=user,
cloud_id=cloud_account_id,
cloud_type=cloud_type
),
media_type="text/event-stream"
)
if action_id: # if action id exists then it means that it's on stage one
async with db_deps.get_db_session_async_context() as db_session:
profile = (await db_session.scalars(select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
api_key, api_key_status = profile.api_key, profile.api_key_status
if not api_key or api_key_status == settings.api_key_status.STATUS_INVALID:
return EventSourceResponse(api_key_msg_event_generator(
api_key_msg=API_KEY_MESSAGE.format(vpcplus_url=f"{settings.web.AUTH_LINK}/settings",
cloud_whisper_url=f"{settings.web.BACKEND_URI}/settings"),
chat_id=conv_req.chat_id))
action = conv_req.question
logger.info(f"stage1 question: {action}")
async with db_deps.get_db_session_async_context() as db_session:
chat_message.msg_category = Message.TYPE_ACTION
await db_session.commit()
return EventSourceResponse(execute_stage_1(initial_query=action, user_dict=user,
chat_id=chat_id, action_id=action_id, cloud_id=cloud_account_id,
cloud_type=cloud_type))
else:
standalone_query, tool = await process_routing(chat_id=chat_id)
end = time.time()
logger.info(f'tool from process_routing is {tool}\nTime it took {end - start}s\nUser query: {standalone_query}')
if tool == 'QnA_or_Schedule_a_call':
return EventSourceResponse(execute_qna(question=conv_req.question,
chat_id=chat_id,
user_name=user["name"]),
media_type="text/event-stream")
elif tool == 'Action':
async with db_deps.get_db_session_async_context() as db_session:
profile = (await db_session.scalars(select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
api_key, api_key_status = profile.api_key, profile.api_key_status
if not api_key or api_key_status == settings.api_key_status.STATUS_INVALID:
return EventSourceResponse(api_key_msg_event_generator(
api_key_msg=API_KEY_MESSAGE.format(vpcplus_url=f"{settings.web.AUTH_LINK}/settings",
cloud_whisper_url=f"{settings.web.BACKEND_URI}/settings"),
chat_id=chat_id))
api_endpoint = "/v1/ibm/clouds"
tool = "Action"
cloud_account_check_response = await check_cloud_account_status(chat_id, api_endpoint, tool,
authorization=authorization)
if cloud_account_check_response:
return cloud_account_check_response
logger.info(f"@@@@@@@@: {chat_id}")
return EventSourceResponse(
narrow_down_intent(chat_id=chat_id, user_dict=user,
standalone_query=standalone_query, action_id=action_id, cloud_id=cloud_account_id),
media_type="text/event-stream")
elif tool == 'InformationRetrieval':
async with db_deps.get_db_session_async_context() as db_session:
profile = (await db_session.scalars(select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
api_key, api_key_status = profile.api_key, profile.api_key_status
if not api_key or api_key_status == settings.api_key_status.STATUS_INVALID:
return EventSourceResponse(api_key_msg_event_generator(
api_key_msg=API_KEY_MESSAGE.format(
vpcplus_url=f"{settings.web.AUTH_LINK}/settings",
cloud_whisper_url=f"{settings.web.BACKEND_URI}/settings"
),
chat_id=chat_id
))
return EventSourceResponse(
get_information_from_db(
question=conv_req.question,
chat_id=chat_id,
user_id=user,
cloud_id=cloud_account_id,
cloud_type=cloud_type
),
media_type="text/event-stream"
)
elif tool == 'ClassicMigration':
action, response_content, complete_response = await narrow_down_migration_intent(chat_id=chat_id,
user_dict=user,
standalone_query=standalone_query)
if action:
# TODO: Handle this efficiently, make it robust
if action.startswith(('START', 'start')):
async with db_deps.get_db_session_async_context() as db_session:
profile = (await db_session.scalars(select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
api_key, api_key_status = profile.api_key, profile.api_key_status
if not api_key or api_key_status == settings.api_key_status.STATUS_INVALID:
return EventSourceResponse(api_key_msg_event_generator(
api_key_msg=API_KEY_MESSAGE.format(vpcplus_url=f"{settings.web.AUTH_LINK}/settings",
cloud_whisper_url=f"{settings.web.BACKEND_URI}/settings"),
chat_id=conv_req.chat_id))
api_endpoint = "/v1/softlayer/accounts"
tool = "ClassicMigration"
cloud_account_check_response = await check_cloud_account_status(chat_id, api_endpoint,
tool,
authorization=authorization)
if cloud_account_check_response:
return cloud_account_check_response
async with db_deps.get_db_session_async_context() as db_session:
messages_obj = (await db_session.scalars(select(Message).options(
selectinload(Message.chat)).filter_by(chat_id=chat_id).order_by(asc(Message.sent_at)))).all()
action_tool = similarity_search_api_desc("migrate_ibm_classic")
messages_obj[-1].json_metadata = json.dumps({
'searched_intent': action_tool,
'task_finished': False,
'stage': 1,
'history': []
})
metadata = json.loads(messages_obj[-1].json_metadata)
metadata['initial_query'] = standalone_query
logger.info(metadata["searched_intent"][0])
action_obj = Action(name=metadata["searched_intent"][0], metadata=json.dumps(metadata))
action_id = action_obj.id
dummy_message_content = "This action is performed by another bot. You can start with a fresh conversation or continue with a new context."
dummy_message = Message(
msg_type=Message.TYPE_ASSISTANT,
content=dummy_message_content,
msg_category=Message.TYPE_ACTION,
is_visible=False,
)
dummy_message.chat = chat
db_session.add(dummy_message)
messages_obj[-1].action_id = action_obj.id
db_session.add(action_obj)
chat_message.msg_category = Message.TYPE_ACTION
await db_session.commit()
EventSourceResponse({"event": "action", "data": json.dumps(action_obj.to_reference_json())})
return EventSourceResponse(
execute_stage_1(initial_query=standalone_query, user_dict=user,
chat_id=chat_id, action_id=action_id, cloud_id=cloud_account_id,
cloud_type=cloud_type))
else:
return EventSourceResponse(execute_qna(question=conv_req.question,
chat_id=chat_id,
user_name=user["name"]),
media_type="text/event-stream")
else:
logger.info("hello")
if response_content:
return EventSourceResponse(
user_msg_event_generator(response_for_user=response_content,
chat_id=chat_id
))
@whisper_chats.get("")
async def list_chats(
user=Depends(deps.authenticate_user),
start: conint(ge=1) = 1,
limit: conint(ge=1, le=settings.pagination_config.MAX_PAGE_LIMIT) = settings.pagination_config.DEFAULT_LIMIT,
):
from app.models.chat import Chat, Message
kwargs = {"profile_id": user["uuid"], "is_visible": True}
async with db_deps.get_db_session_async_context() as db_session:
total = await db_session.scalar(select(func.count(Chat.id)).filter_by(**kwargs))
if not total:
raise HTTPException(status_code=204, detail=f"No chats found having User ID: '{user['uuid']}'")
pages = ceil(total / limit)
if start > pages:
start = 1
offset = (start - 1) * limit
chats = (await db_session.scalars(
select(Chat)
.filter_by(**kwargs)
.order_by(desc(Chat.created_at))
.options(
joinedload(Chat.profile),
selectinload(Chat.messages).joinedload(Message.action)
)
.offset(offset)
.limit(limit)
)).all()
return {
"items": [chat.to_reference_json() for chat in chats],
"previous": start - 1 if start > 1 else None,
"next": start + 1 if start < pages else None,
"pages": pages,
"total": total
}
async def update_chat_title(
chat_id: str, title: str
):
from app.models.chat import Chat
try:
if len(title) > 80:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail={"error": "Chat 'title' length should be less than 80 chars."},
)
async with db_deps.get_db_session_async_context() as db_session:
chat_query = await db_session.execute(
update(Chat)
.where(Chat.id == chat_id)
.values(title=title)
.returning(Chat)
)
updated_chat = chat_query.scalars().first()
await db_session.commit()
except ValueError:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail={"error": "Unexpected error occurred."},
)
else:
if not updated_chat:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail={"error": f"No Chat found with id '{chat_id}'"}
)
return updated_chat
@whisper_chats.patch("/{chat_id}")
async def update_chat(
chat_id: str,
update_req: UpdateChatRequest,
user=Depends(deps.authenticate_user)
):
from app.models.chat import Chat
updated_chat = None
update_values = {}
if isinstance(update_req.is_visible, bool):
update_values['is_visible'] = update_req.is_visible
if update_req.metadata:
update_values['metadata_'] = json.dumps(update_req.metadata)
async with db_deps.get_db_session_async_context() as db_session:
if update_values:
query = await db_session.execute(
update(Chat)
.where(Chat.id == chat_id)
.values(**update_values)
.returning(Chat)
)
updated_chat = query.scalars().first()
await db_session.commit()
if update_req.title and update_req.title != "":
updated_chat = await update_chat_title(chat_id, update_req.title)
return updated_chat
@whisper_chats.get("/{chat_id}")
async def chat_history(
chat_id: str,
user=Depends(deps.authenticate_user),
start: conint(ge=0) = 1,
limit: conint(ge=0, le=settings.pagination_config.MAX_PAGE_LIMIT) = settings.pagination_config.DEFAULT_LIMIT,
):
from app.models.chat import Chat, Message
async with db_deps.get_db_session_async_context() as db_session:
chat_result = await db_session.scalar(
select(Chat)
.options(
selectinload(Chat.profile),
selectinload(Chat.messages.and_(Message.is_visible)).selectinload(Message.action)
)
.where(Chat.id == chat_id)
)
if not chat_result:
raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail={"error": f"No Chat found with id '{chat_id}'"})
if not chat_result.messages:
raise HTTPException(
status_code=HTTPStatus.NO_CONTENT, detail={"error": f"No Chat history found with id '{chat_id}'"}
)
visible_messages = [message for message in chat_result.messages if message.is_visible]
total = len(visible_messages)
if not total:
raise HTTPException(status_code=204, detail=f"No messages found having Chat ID: '{chat_id}'")
reversed_messages = list(reversed(visible_messages))
pages = ceil(total / limit)
if start > pages:
start = 1
paginated_messages = reversed_messages[(start - 1) * limit:start * limit]
paginated_messages.reverse()
chat_data = {
"id": chat_result.id,
"is_visible": chat_result.is_visible,
"title": chat_result.title,
"type": chat_result.chat_type,
"created_at": chat_result.created_at,
"profile_id": chat_result.profile_id,
"user": {
"id": chat_result.profile.id,
"email": chat_result.profile.email,
"is_admin": chat_result.profile.is_admin,
"name": chat_result.profile.name,
"project_id": chat_result.profile.project_id,
"user_id": chat_result.profile.user_id,
"api_key": chat_result.profile.api_key
},
"messages": [
{
"id": message.id,
"sent_at": message.sent_at,
"text": message.content,
"type": message.type,
"msg_type": message.msg_category,
"chat_id": message.chat_id,
"action_id": message.action_id,
"is_visible": message.is_visible,
"action_name": message.action.name if message.action else None
} for message in paginated_messages
]
}
return {
"items": chat_data["messages"],
"previous": start - 1 if start > 1 else None,
"next": start + 1 if start < pages else None,
"pages": pages,
"total": total
}
@whisper_chats.delete("/{chat_id}/messages/{message_id}", status_code=HTTPStatus.NO_CONTENT)
async def delete_message(
chat_id: str,
message_id: str,
user=Depends(deps.authenticate_user)
):
from app.models.chat import Chat, Message
async with db_deps.get_db_session_async_context() as db_session:
chat_query = await db_session.execute(
select(Chat)
.options(joinedload(Chat.messages))
.where(Chat.id == chat_id)
)
conversation = chat_query.unique().scalar_one_or_none()
if not conversation:
raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail={"error": f"No Chat found with id '{chat_id}'"})
deleted_msg_result = await db_session.execute(
select(Message)
.where(Message.id == message_id)
.where(Message.chat_id == chat_id)
)
deleted_msg = deleted_msg_result.scalar_one_or_none()
if not deleted_msg:
raise HTTPException(
status_code=HTTPStatus.NO_CONTENT, detail={"error": f"No Message found with id '{message_id}'."}
)
await db_session.delete(deleted_msg)
await db_session.commit()
@whisper_chats.delete("/{chat_id}")
async def delete_chat(
chat_id: str,
user=Depends(deps.authenticate_user)
):
from app.models.chat import Chat
async with db_deps.get_db_session_async_context() as db_session:
chat_result = await db_session.execute(
select(Chat)
.options(selectinload(Chat.messages))
.where(Chat.id == chat_id)
)
chat = chat_result.scalar_one_or_none()
if not chat:
raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail=f"No Chat found having ID: '{chat_id}'")
await db_session.delete(chat)
await db_session.commit()
@whisper_chats.get("/actions/{action_id}", name="List messages against an Action")
async def list_action_messages(
action_id,
user=Depends(deps.authenticate_user)
):
from app.models.chat import Action
async with db_deps.get_db_session_async_context() as db_session:
query = await db_session.execute(select(Action).filter_by(id=action_id).options(selectinload(Action.messages)))
actions = query.scalar_one_or_none()
if not actions:
raise HTTPException(status_code=404, detail="Action not found")
messages = actions.messages
messages = messages[1:] if messages else []
actions.messages = messages
return actions.to_json()
|
CloudWhisperCustomBot | app/web/clouds/schemas.py | from pydantic import BaseModel
from typing import List, Optional
class CloudAccount(BaseModel):
id: str
name: str
class CloudAccountsResponse(BaseModel):
ibm_cloud_accounts: Optional[List[CloudAccount]] = []
ibm_softlayer_cloud_accounts: Optional[List[CloudAccount]] = []
aws_cloud_accounts: Optional[List[CloudAccount]] = []
gcp_cloud_accounts: Optional[List[CloudAccount]] = []
azure_cloud_accounts: Optional[List[CloudAccount]] = []
vmware_cloud_accounts: Optional[List[CloudAccount]] = []
|
CloudWhisperCustomBot | app/web/clouds/__init__.py | from .api import whisper_clouds
__all__ = ["whisper_clouds"]
|
CloudWhisperCustomBot | app/web/clouds/api.py | import requests
from fastapi import APIRouter, Depends, HTTPException
from fastapi.security import HTTPAuthorizationCredentials
from app.core.config import settings
from app.web.clouds.schemas import CloudAccountsResponse, CloudAccount
from app.web.common import deps
from app.web.common.deps import security
whisper_clouds = APIRouter()
def fetch_all_pages(base_url, headers, per_page=5):
results = []
page = 1
while True:
url = f"{base_url}?page={page}&per_page={per_page}"
response = requests.get(url, headers=headers)
if response.status_code == 204:
break
response.raise_for_status()
data = response.json()
results.extend(data.get('items', []))
next_page = data.get('next_page', None)
if not next_page:
break
page += 1
return results
@whisper_clouds.get("", response_model=CloudAccountsResponse)
async def get_cloud_accounts(
user=Depends(deps.authenticate_user),
authorization: HTTPAuthorizationCredentials = Depends(security)
):
IBM_VPC_CLOUD_URL = settings.web.AUTH_LINK + "/v1/ibm/clouds"
IBM_SOFTLAYER_CLOUD_URL = settings.web.AUTH_LINK + "/v1/softlayer/accounts"
headers = {'Authorization': f"Bearer {authorization.credentials}"}
try:
ibm_vpc_cloud_accounts_data = fetch_all_pages(IBM_VPC_CLOUD_URL, headers)
ibm_vpc_cloud_accounts = [
CloudAccount(id=account["id"], name=account["name"])
for account in ibm_vpc_cloud_accounts_data
]
except requests.exceptions.RequestException as e:
raise HTTPException(status_code=500, detail=f"Error fetching IBM VPC cloud accounts: {str(e)}")
try:
ibm_softlayer_cloud_accounts_data = fetch_all_pages(IBM_SOFTLAYER_CLOUD_URL, headers)
ibm_softlayer_cloud_accounts = [
CloudAccount(id=account["id"], name=account["name"])
for account in ibm_softlayer_cloud_accounts_data
]
except requests.exceptions.RequestException as e:
raise HTTPException(status_code=500, detail=f"Error fetching IBM SoftLayer cloud accounts: {str(e)}")
return CloudAccountsResponse(
ibm_cloud_accounts=ibm_vpc_cloud_accounts,
ibm_softlayer_cloud_accounts=ibm_softlayer_cloud_accounts
)
|
CloudWhisperCustomBot | app/web/websockets/schemas.py | import typing as t
from pydantic import BaseModel, Field
class MessageRequest(BaseModel):
text: str
type: t.Optional[str] = "Human"
class ChatRequest(BaseModel):
question: str
chat_id: t.Optional[str] = Field(default=None, max_length=32, min_length=32, regex="^[0-9a-fA-F]+$")
action_id: t.Optional[str] = Field(default=None, max_length=32, min_length=32, regex="^[0-9a-fA-F]+$")
regenerate: t.Optional[bool] = Field(default=False)
|
CloudWhisperCustomBot | app/web/websockets/__init__.py | from .api import websockets_chats
__all__ = ["websockets_chats"]
|
CloudWhisperCustomBot | app/web/websockets/api.py | import json
import time
from fastapi import APIRouter, Depends, HTTPException, WebSocket, WebSocketException, WebSocketDisconnect
from fastapi.security import HTTPAuthorizationCredentials
from loguru import logger
from sqlalchemy import select, desc
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
from sse_starlette import EventSourceResponse
from app.core.config import settings, APIKeyStatusConfig
from app.models.profile import Profile
from app.web.common import db_deps, deps
from app.web.common.chats_websockets_utils import process_routing, execute_qna, narrow_down_intent, execute_stage_1, \
get_information_from_db, check_cloud_account_status
from app.web.common.cloud_setup_instruction_messages import API_KEY_MESSAGE
from app.web.common.deps import security
from app.web.common.utils import api_key_msg_event_generator
from app.web.websockets.schemas import ChatRequest
websockets_chats = APIRouter()
@websockets_chats.websocket("/whisper-inference")
async def chat_api(
websocket: WebSocket,
authorization: HTTPAuthorizationCredentials = Depends(security),
db_session: AsyncSession = Depends(db_deps.get_db_session_async)
):
# connection closed exception handled
try:
await websocket.accept()
user = await deps.first_message_handler(websocket)
while True:
data = await websocket.receive_text()
json_data = json.loads(data)
try:
# schema validation
# Parse the received JSON into Pydantic model
conv_req = ChatRequest(**json_data)
from app.models.chat import Chat, Message, Action
start = time.time()
chat_id, action_id, regenerate = conv_req.chat_id, conv_req.action_id, conv_req.regenerate
logger.info(f"Provided question: {conv_req}")
if chat_id:
logger.info(f"********************action id**********************: {action_id}")
chat = ((await db_session.scalars(
select(Chat).filter_by(id=chat_id).options(selectinload(Chat.messages)))).
one_or_none())
logger.info(f"chat: {chat}")
if regenerate:
query = select(Message).options(
selectinload(Message.action) if action_id else selectinload(Message.chat)
).order_by(desc(Message.sent_at))
if action_id:
query = query.filter_by(action_id=action_id)
else:
query = query.filter_by(chat_id=chat_id)
chat_message = (await db_session.scalars(query)).first()
if chat_message and chat_message.type == Message.TYPE_HUMAN:
conv_req.question = chat_message.content
else:
if action_id:
action = (await db_session.scalars(select(Action).filter_by(id=action_id))).one_or_none()
logger.info(f"Action {action.id}")
if not action:
raise HTTPException(status_code=404, detail="Action not found")
chat_message = Message(msg_type=Message.TYPE_HUMAN, content=conv_req.question,
msg_category=Message.TYPE_ACTION)
logger.info(chat_message.id)
chat_message.action = action
db_session.add(chat_message)
await db_session.commit()
else:
chat_message = Message(msg_type=Message.TYPE_HUMAN, content=conv_req.question,
msg_category=Message.TYPE_ACTION)
chat_message.chat = chat
db_session.add(chat_message)
await db_session.commit()
else:
logger.info("Creating new chat")
profile = (
await db_session.scalars(select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
chat = Chat(title=conv_req.question[:80], chat_type=Chat.TYPE_QNA)
chat_message = Message(msg_type=Message.TYPE_HUMAN, content=conv_req.question,
msg_category=Message.TYPE_QNA)
chat_id, chat.profile, chat_message.chat = chat.id, profile, chat
db_session.add(chat)
db_session.add(chat_message)
await db_session.commit()
if action_id: # if action id exists then it means that it's on stage one
profile = (
await db_session.scalars(select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
if not profile.api_key or profile.api_key_status == APIKeyStatusConfig.STATUS_INVALID:
return EventSourceResponse(api_key_msg_event_generator(
api_key_msg=API_KEY_MESSAGE.format(vpcplus_url=f"{settings.web.AUTH_LINK}/settings",
cloud_whisper_url=f"{settings.web.BACKEND_URI}/settings"),
chat_id=conv_req.chat_id, db_client=db_session))
api_endpoint = "/v1/ibm/clouds"
tool = "Action"
cloud_account_check_response = await check_cloud_account_status(db_session, chat_id, api_endpoint,
tool, authorization=authorization)
if cloud_account_check_response:
return cloud_account_check_response
action = conv_req.question
logger.info(f"stage1 question: {action}")
chat_message.msg_category = Message.TYPE_ACTION
await db_session.commit()
async for chunk in execute_stage_1(initial_query=action, db_client=db_session, user_dict=user,
chat_id=chat_id, action_id=action_id):
json_chunk = json.dumps(chunk)
await websocket.send_text(json_chunk)
else:
standalone_query, tool = await process_routing(chat_id=chat_id, db_client=db_session)
end = time.time()
logger.info(
f'tool from process_routing is {tool}\nTime it took {end - start}s\nUser query: {standalone_query}')
if tool == 'QnA':
async for chunk in execute_qna(chat_id=chat_id, db_client=db_session):
json_chunk = json.dumps(chunk)
await websocket.send_text(json_chunk)
elif tool == 'Action':
profile = (await db_session.scalars(
select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
if not profile.api_key or profile.api_key_status == APIKeyStatusConfig.STATUS_INVALID:
async for chunk in api_key_msg_event_generator(
api_key_msg=API_KEY_MESSAGE.format(vpcplus_url=f"{settings.web.AUTH_LINK}/settings",
cloud_whisper_url=f"{settings.web.BACKEND_URI}/settings"),
chat_id=chat_id, db_client=db_session):
json_chunk = json.dumps(chunk)
await websocket.send_text(json_chunk)
api_endpoint = "/v1/ibm/clouds"
tool = "Action"
cloud_account_check_response = await check_cloud_account_status(db_session, chat_id,
api_endpoint, tool,
authorization=authorization)
if cloud_account_check_response:
return cloud_account_check_response
async for chunk in narrow_down_intent(chat_id=chat_id, db_client=db_session, user_dict=user,
standalone_query=standalone_query, action_id=action_id):
json_chunk = json.dumps(chunk)
await websocket.send_text(json_chunk)
elif tool == 'InformationRetrieval':
profile = (await db_session.scalars(
select(Profile).filter(Profile.user_id == user["id"]))).one_or_none()
if not profile.api_key or profile.api_key_status == APIKeyStatusConfig.STATUS_INVALID:
async for chunk in api_key_msg_event_generator(
api_key_msg=API_KEY_MESSAGE.format(vpcplus_url=f"{settings.web.AUTH_LINK}/settings",
cloud_whisper_url=f"{settings.web.BACKEND_URI}/settings"),
chat_id=chat_id, db_client=db_session):
json_chunk = json.dumps(chunk)
await websocket.send_text(json_chunk)
api_endpoint = "/v1/ibm/clouds"
tool = "InformationRetrieval"
cloud_account_check_response = await check_cloud_account_status(db_session, chat_id,
api_endpoint, tool,
authorization=authorization)
if cloud_account_check_response:
return cloud_account_check_response
async for chunk in get_information_from_db(
question=conv_req.question,
chat_id=chat_id,
db_client=db_session,
user_id=user["id"]
):
json_chunk = json.dumps(chunk)
await websocket.send_text(json_chunk)
except ValueError as e:
# Handle validation error
await websocket.send_text(json.dumps({"event": "error", "data": f"Invalid payload: {e}"}))
except WebSocketDisconnect as e:
reasons = {
1000: "Normal closure: Connection closed as expected.",
1001: "Going away: The server is shutting down or client navigated away."
}
reason = reasons.get(e.code, f"Unknown reason: Code {e.code}")
raise WebSocketException(code=e.code, reason=reason)
|
CloudWhisperCustomBot | app/whisper/consts.py | WHISPER_USER_ROLE = 'user'
WHISPER_ASSISTANT_ROLE = 'assistant'
|
CloudWhisperCustomBot | app/whisper/utils/config_reader.py | import json
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CONFIG_DIR = os.path.join(BASE_DIR, 'utils')
class ConfigLoader:
def __init__(self, intent):
self.validation_config = {}
self.dependencies_config = {}
self.intent = intent
@staticmethod
def _load_from_file(file_path):
with open(file_path, 'r') as file:
return json.load(file)
def load_validation_config(self):
validation_config_fp = "app/whisper/flow/phases/action/config/validation_criteria.json"
self.validation_config = ConfigLoader._load_from_file(validation_config_fp)
return self.validation_config
def load_dependencies_config(self):
dependencies_config_fp = "app/whisper/flow/phases/action/config/dependencies.json"
self.dependencies_config = ConfigLoader._load_from_file(dependencies_config_fp)
return self.dependencies_config[self.intent]
def load_configs(self):
return self.load_validation_config(), self.load_dependencies_config()
|
CloudWhisperCustomBot | app/whisper/utils/prompt.py | DB_RESULT_FORMAT_PROMPT = """
You are an expert database analyst AI tasked with formatting database results into clear, readable markdown tables while preserving any accompanying text. Your goal is to present the information in a way that's easy for users to understand and analyze.
Here is the database result you need to process:
------------------
{content}
------------------
Please follow these steps to format the database result:
1. Analyze the content:
breaking down the data structure, identifying key information, and planning how to organize it into one or more tables. Consider the following:
- How many distinct groups or categories are in the data?
- What columns should be included in each table?
- Are there any patterns in the data that should be highlighted?
- Are there any potential formatting issues or data inconsistencies?
- What is the best alignment for each column based on its data type (e.g., left for text, right for numbers)?
- Is there any text before or after the database result that should be preserved?
- Are there any empty or null values that need special handling?
2. Create the table(s):
based on your analysis, create one or more markdown tables with the following guidelines:
- Use | to separate columns
- Use - to create the header row
- Align columns using : in the header row (e.g., :--- for left alignment, :---: for center alignment, ---: for right alignment)
- Ensure the "Name" column (if present) is always the first column
- Do not include an "ID" column
- Here's a comprehensive example of a correctly formatted markdown table:
| Name | Age | Occupation | Salary |
|:------------|----:|:-----------|:---------|
| John Doe | 32 | Engineer | $75,000 |
| Jane Smith | 28 | Designer | $65,000 |
| Bob Johnson | 45 | Manager | $100,000 |
| Alice Brown | 39 | Analyst | $80,000 |
3. Format the data:
format the data within the table(s):
- Present dates in a human-readable format (e.g., YYYY-MM-DD)
- Ensure all relevant information from the original result is included in the table(s)
- Do not add any information that wasn't in the original result
- Handle any empty or null values consistently (e.g., use "N/A" or "-")
4. Final checks:
verify the following:
- All tables are properly formatted with correct markdown syntax
- No information has been lost or added
- A new line has been added after each table if there are multiple tables
- Any text before or after the database result has been preserved
- Column alignments are appropriate for the data types
5. Output Formatted Result:
Present your final formatted result, including:
- Any text that appeared before the database result
- The properly formatted markdown table(s)
- Any text that appeared after the database result
Remember:
- If the database result doesn't contain structured data suitable for a table, provide the original result without any modifications after Output Formatted Result:
- Pay special attention to the markdown formatting of the tables. Ensure that the | and - characters are used correctly to create valid markdown tables that will be parsed properly.
- Do not include phrases like "Here is the database result formatted into a table using Markdown format:" or any similar introductory text.
- Do not provide explanations about how you formatted the result or any comments after the table(s), unless they were present in the original content.
- Must always generate the Output Formatted Result: as user can not see result unless its after Output Formatted Result:.
Please provide your response in the following format:
Analyze the content: [Enter your analysis of the content]
Create the table(s): [Create the table at this stage]
Format the data: [Format the data inside the table]
Final checks: [perform some final checks]
Output Formatted Result: [\n\n Provided correctly formatted markdown table and preserved text from original database result here]
"""
PAYLOAD_RESULT_FORMAT_PROMPT = """
You are tasked with generating a formatted message that improves readability and understanding for the user. The message will contain payload information and a review message.
You will be provided with payload information and review message:
To format this payload and improves readability and understanding for the user, follow these steps:
1. Start with a brief introduction or title for the message.
2. List the payload information in a bulleted or numbered format.
3. Present the review message as a separate paragraph after the payload information.
4. End with a question asking the user to confirm if the information is correct.
5. Provide response options for the user (e.g., 'yes' or 'no').
6. Make sure if dates are present then format them to be human readable.
7. Dont add ANY comments other than the database result
Example Input 1:
Description: Backup of techx-vpc
VPC Name: techx-vpc
Is this information correct? Please respond with 'yes' if everything looks good, or 'no' if you need to make any changes.
Example Output 1:
Kindly review the details:
- **Description:** Backup of techx-vpc
- **VPC Name:** techx-vpc
Is this information correct? Please respond with 'yes' if everything looks good, or 'no' if you need to make any changes.
Follow this structured output format:
Kindly review the details:
Payload Information:
- **Item1:** Value
- **Item2:** Value
Is this information correct? Please respond with 'yes' if everything looks good, or 'no' if you need to make any changes.
Using the provided payload and review message, generate a formatted message following the structure outlined above. Ensure that all information from the inputs is included in your formatted message.
"""
|
CloudWhisperCustomBot | app/whisper/utils/__init__.py | from .config_reader import ConfigLoader
__all__ = ['ConfigLoader']
|
CloudWhisperCustomBot | app/whisper/utils/json_loader.py | import json
from pathlib import Path
from typing import List, Optional, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class JSONLoader(BaseLoader):
def __init__(
self,
file_path: Union[str, Path],
content_key: Optional[str] = None,
):
self.file_path = Path(file_path).resolve()
self._content_key = content_key
def load(self) -> List[Document]:
"""Load and return documents from the JSON file."""
docs = []
# Load JSON file
with open(self.file_path) as file:
data = json.load(file)
# Iterate through 'pages'
for action in data.keys():
docs.append(Document(page_content=action, metadata={'method': data[action]}))
return docs
|
CloudWhisperCustomBot | app/whisper/utils/pagination_utils.py | import re
from loguru import logger
def generate_count_query(cypher_query):
# Regular expression to match full MATCH or OPTIONAL MATCH clauses, including node patterns and conditions
match_pattern = re.compile(r'(OPTIONAL\s+MATCH|MATCH)\s+\([^\)]+\)[^\n\r]*(?:\s*WHERE[^\n\r]*)?', re.IGNORECASE)
# Remove the RETURN clause if it exists
cypher_query_without_return = re.sub(r'\s*RETURN[^\n\r]*', '', cypher_query, flags=re.IGNORECASE)
# Extract all MATCH or OPTIONAL MATCH clauses from the query
match_clauses = match_pattern.findall(cypher_query_without_return)
# Check if there are any MATCH clauses found
if match_clauses:
# Rebuild the count query from all MATCH clauses
count_query = " ".join([clause.group(0) for clause in match_pattern.finditer(cypher_query_without_return)]) + " RETURN COUNT(*) AS total_count"
else:
# If no MATCH clauses are found, return an error message
return "Invalid query: No MATCH clause found."
return count_query
def calculate_resource_shown(cypher_query, default_increment=20):
# Regular expression to find SKIP in the query
skip_pattern = re.compile(r'\bSKIP\s+(\d+)', re.IGNORECASE)
# Search for SKIP value in the query
skip_match = skip_pattern.search(cypher_query)
if skip_match:
# Extract current SKIP value
current_skip = int(skip_match.group(1))
# Calculate resource_shown based on SKIP value
resource_shown = current_skip + default_increment
else:
# If SKIP is not found, default to 20 (first page)
resource_shown = default_increment
return resource_shown
|
CloudWhisperCustomBot | app/whisper/utils/validators.py | import re
import uuid
import requests
from app.whisper.utils import ConfigLoader
alphanumeric_error = "'{value}' for the '{key}' attribute must be alphanumeric."
uuid_error = "'{value}' for the '{key}' attribute must be a valid UUID."
dependency_error_msg = "{dependencies} must be provided before '{key}'."
BASE_URL = "https://vpc-stage.wanclouds.net{endpoint}"
class JSONValidator:
def __init__(self, intent: str, headers: dict):
self.intent = intent
self.headers = headers
self.validation_config, self.dependencies = ConfigLoader().load_configs()
if not (self.validation_config and self.dependencies):
raise RuntimeError(f"Please add the '{self.intent}' in the validation_config.json and dependencies.json "
f"files.")
def is_alphanumeric(self, value):
return bool(re.match(r"^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$", value))
def is_valid_uuid(self, value):
try:
uuid_obj = uuid.UUID(value)
return str(uuid_obj) == value
except ValueError:
return False
def _fetch_resources(self, field, data):
endpoint = self.validation_config[field]["available_resources"]
if endpoint:
dependencies = self.dependencies.get(field)
if dependencies and isinstance(dependencies, list):
query_params = {dep: data.get(dep) for dep in dependencies if data.get(dep)}
query_string = '&'.join([f"{key}={value}" for key, value in query_params.items()])
endpoint += f"?{query_string}"
# Make the API call using the constructed endpoint
url = BASE_URL.format(endpoint=endpoint)
response = requests.get(url, headers=self.headers)
print("\n\n")
if response.status_code == 200:
return response.json()
return []
def validate_json(self, data):
errors = []
validation_functions = {
"is_alphanumeric": self.is_alphanumeric,
"is_valid_uuid": self.is_valid_uuid,
}
validation_functions_to_errors = {
"is_alphanumeric": alphanumeric_error,
"is_valid_uuid": self.is_valid_uuid,
}
if "json" in data:
data = data["json"]
for key, value in data.items():
if value == "<missing>":
continue
if isinstance(value, dict) and len(value.keys()) > 2:
return self.validate_json(value)
field_config = self.validation_config.get(key)
print("--------------------- validate_json --------------------")
print(f"{key} - {field_config}")
print("--------------------- validate_json --------------------")
if field_config:
# TODO: change this
dependencies = self.dependencies.get(key, [])
if dependencies:
missing_dependencies = [dep for dep in dependencies if data.get(dep) == "<missing>"]
if missing_dependencies:
errors.append(
dependency_error_msg.format(key=key, dependencies={', '.join(missing_dependencies)})
)
continue
validation_fn_name = field_config.get("validation_fn")
if validation_fn_name and validation_fn_name in validation_functions:
validation_fn = validation_functions[validation_fn_name]
validation_error = validation_functions_to_errors[validation_fn_name]
if not validation_fn(value):
errors.append(validation_error.format(key=key, value=value))
if dependencies:
for dep in dependencies:
if data.get(dep) == "<missing>":
errors.append(f"Validation failed for '{key}'. Dependency '{dep}' is missing.")
break
# if dependencies:
if "available_resources" in field_config:
fetched_resources = self._fetch_resources(key, data)
if isinstance(fetched_resources, dict) and "items" in fetched_resources:
fetched_resources = fetched_resources["items"]
if not fetched_resources:
errors.append(
f"Validation failed for '{key}' with value '{value}'. No available resource found.")
continue
fetched_resources_names = []
for resource in fetched_resources:
if "resource_json" in resource:
fetched_resources_names.append(resource['resource_json']['name'])
else:
fetched_resources_names.append(resource['name'])
if value not in fetched_resources_names:
errors.append(f"No '{key}' with value '{value}' found. Please check the available '{key}'")
# Filter out errors for fields with value '<missing>'
errors = [error for error in errors if '<missing>' not in error]
return errors
|
CloudWhisperCustomBot | app/whisper/utils/action_engine/consts.py | from enum import Enum
ibm_region_mapper = {
"dallas": "us-south",
"dallas tx": "us-south",
"dallas texas": "us-south",
"sydney": "au-syd",
"sydney au": "au-syd",
"sydney australia": "au-syd",
"london": "eu-gb",
"london uk": "eu-gb",
"london united kingdom": "eu-gb",
"frankfurt": "eu-de",
"frankfurt de": "eu-de",
"frankfurt germany": "eu-de",
"osaka": "jp-osa",
"osaka jp": "jp-osa",
"osaka japan": "jp-osa",
"tokyo": "jp-tok",
"tokyo jp": "jp-tok",
"tokyo japan": "jp-tok",
"sao paulo": "br-sao",
"sao paulo br": "br-sao",
"sao paulo brazil": "br-sao",
"toronto": "ca-tor",
"toronto ca": "ca-tor",
"toronto canada": "ca-tor",
"madrid": "eu-es",
"madrid es": "eu-es",
"madrid spain": "eu-es",
"washington dc": "us-east",
"washington": "us-east",
"dc": "us-east",
"d.c.": "us-east",
"dc.": "us-east",
"washington d.c.": "us-east",
"washington d.c": "us-east",
"washington usa": "us-east",
"washington us": "us-east",
"washington district of columbia": "us-east"
}
confirmation_feedback = 'Before moving to next tool. Generate a detailed summary of the previous step \
before moving ahead and confirm it from user.'
missing_keys_feedback = f'''The following keys are missing from the provided payload: {{missing_keys}} from the \
tool {{tool}}. Please fill out remaining fields first and recall the tool'''
incorrect_keys_feedback = f'''The following keys have incorrect 'id' values in the provided payload: {{incorrect_ids}} \
from the tool {{tool}}. Please fill out or correct the ids and then call the tool again'''
duplicate_ids_feedback =f'''
You have inadvertently added incorrect or duplicate IDs to the payload duplicate keys : {{duplicate_keys}} from the tool {{tool}}. Please follow these steps to correct the issue:
1. For each resource that requires an ID:
a. Use the db_tool to fetch the correct list of options.
b. Present these options to the user for selection.
c. Once the user selects an option, use the db_tool again to retrieve the corresponding ID.
2. Only include IDs in the payload that have been explicitly fetched from the db_tool and confirmed by user selection.
3. Do not add any IDs on your own without verification.
4. If you're unsure about an ID, leave it blank or use a placeholder like "TO_BE_FETCHED" instead of guessing.
After making these corrections, please call the tool again with the updated payload.
Remember: Always use the db_tool to fetch and verify IDs before including them in the payload.
'''
class MessageFormatting(Enum):
DISABLED = False
DATABASE = "DATABASE"
PAYLOAD = "PAYLOAD" |
CloudWhisperCustomBot | app/whisper/utils/action_engine/__init__.py | from app.whisper.utils.action_engine.complex_base import ComplexActionPhaseClaude
from app.whisper.utils.action_engine.base import ActionPhaseClaude
__all__ = ['ActionPhaseClaude', 'ComplexActionPhaseClaude']
|
CloudWhisperCustomBot | app/whisper/utils/action_engine/complex_base.py | import ast
import copy
import json
import re
import requests
from anthropic import APIConnectionError, RateLimitError, APIStatusError
from loguru import logger
from app.web.common.utils import update_activity_tracking
from app.whisper.consts import WHISPER_USER_ROLE
from app.whisper.llms.anthropic import AnthropicLLM
from app.whisper.utils.action_engine.consts import ibm_region_mapper, confirmation_feedback, missing_keys_feedback, \
incorrect_keys_feedback, duplicate_ids_feedback, MessageFormatting
from app.whisper.utils.action_engine.prompt import COMPLEX_SYSTEM as SYSTEM
from app.whisper.utils.neo4j.client import Neo4j
from app.whisper.utils.prompt import DB_RESULT_FORMAT_PROMPT, PAYLOAD_RESULT_FORMAT_PROMPT
RED = "\033[91m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
BLUE = "\033[94m"
MAGENTA = "\033[95m"
CYAN = "\033[96m"
WHITE = "\033[97m"
RESET = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
class ComplexActionPhaseClaude:
def __init__(self, intent: str, chat_history: list, bearer: str, user_id: str, metadata: dict, cloud_id):
self.last_tool = None
self.base_llm = AnthropicLLM()
self.metadata = metadata
self.base_llm.messages = chat_history # it adds messages in self.base_llm
self.endpoint = intent[-1]['method']['endpoint']
self.api_function_names = [tool["name"] for tool in intent[-1]['method']['tool']]
self.required_keys = {}
for tool in intent[-1]['method']['tool']:
api_function_schema_copy = copy.deepcopy(tool["input_schema"])
self.required_keys[tool["name"]] = self.extract_keys(api_function_schema_copy)
self.bearer = bearer
self.tools = self._get_tool_from_intent(intent)
self.user_id = user_id
self.tool_force = metadata.get('tool_force', False)
self.confirmation_stage = metadata.get("confirmation_stage", False)
self.tool_in_progress = metadata.get("tool_in_progress", None)
self.previous_tool_in_progress = metadata.get("previous_tool_in_progress", None)
self.tools_output = metadata.get("tools_output", dict())
self.payload = metadata.get("payload", {}) # combined payload
self.tools_confirmed = metadata.get('tools_confirmed', [])
self.schema = None
self.switch_tool = metadata.get('switch_tool', False)
self.switch_tool_id = metadata.get('switch_tool_id', None)
self.cloud_id = cloud_id
from app.main import app
self.neo4j_client = Neo4j(db_session=app.state.neo4j_session, user_id=self.user_id, cloud_id=self.cloud_id,
vpc=True)
self.schema = self.neo4j_client.schema
self.llm_chat_history = ''
for chat_ in self.base_llm.messages[-10:]:
logger.info(chat_) # don't add user latest query in chat history
self.llm_chat_history += \
f"<{chat_['role'].lower()}>: {json.dumps(chat_['content'])}</{chat_['role'].lower()}>\n"
self.final_msg_formatting = MessageFormatting.DISABLED.value
def get_metadata(self):
return {
'tool_force': False,
"tool_in_progress": self.tool_in_progress,
"previous_tool_in_progress": self.previous_tool_in_progress,
"tools_output": self.tools_output,
"confirmation_stage": self.confirmation_stage,
"payload": self.payload,
'tools_confirmed': self.tools_confirmed
}
@classmethod
def extract_keys(self, schema, parent_key=None):
required_dict = {}
if isinstance(schema, dict):
for key, value in schema.items():
# Check if the current key is 'required' and the parent is not 'items'
if key == 'required' and isinstance(value, list) and parent_key != 'items':
if parent_key:
if parent_key in required_dict:
required_dict[parent_key].extend(value)
else:
required_dict[parent_key] = value
else:
required_dict[key] = value
if isinstance(value, dict):
nested_required_values = self.extract_keys(value, key)
for nested_key, nested_value in nested_required_values.items():
if nested_key in required_dict:
required_dict[nested_key].extend(nested_value)
else:
required_dict[nested_key] = nested_value
return required_dict
def keys_exist_in_schema(self, required_keys_dict, schema):
def check_key_in_schema(schema, key, parent_key=None):
if isinstance(schema, dict):
if parent_key:
if parent_key in schema and isinstance(schema[parent_key], dict) and key in schema[parent_key]:
if key == 'id' or 'id' in key:
id_value = schema[parent_key][key]
if id_value is not None: # Add this check
if not re.match(r'^[0-9a-f]{32}$', id_value):
return False, f"{parent_key}.{key}"
else:
return False, f"{parent_key}.{key}" # Return False for None values
return True, None
elif key in schema:
if key == 'id' or 'id' in key:
id_value = schema[key]
if id_value is not None: # Add this check
if not re.match(r'^[0-9a-f]{32}$', id_value):
return False, key
else:
return False, key # Return False for None values
return True, None
for sub_key, sub_value in schema.items():
if isinstance(sub_value, dict):
result, incorrect_key = check_key_in_schema(sub_value, key, parent_key)
if result:
return True, None
elif incorrect_key:
return False, incorrect_key
return False, None
missing_keys = []
incorrect_id_keys = []
for parent_key, keys in required_keys_dict.items():
if parent_key == 'required':
for key in keys:
if key not in schema:
missing_keys.append(key)
else:
for key in keys:
result, incorrect_key = check_key_in_schema(schema, key, parent_key)
if not result:
if incorrect_key:
incorrect_id_keys.append(incorrect_key)
else:
missing_keys.append(f"{parent_key}.{key}" if parent_key else key)
if missing_keys or incorrect_id_keys:
return False, missing_keys, incorrect_id_keys
return True, None, None
def check_duplicate_ids(self, schema):
ids = {}
duplicate_keys = []
def collect_ids(obj, path=""):
if isinstance(obj, dict):
for key, value in obj.items():
current_path = f"{path}.{key}" if path else key
if key == 'id':
if value in ids:
ids[value].append(current_path)
duplicate_keys.append(current_path)
if len(ids[value]) >= 3: # Check for 3 or more duplicates
return True
elif len(ids[value]) == 2:
# Check if both parent keys contain 'cloud'
if not all('cloud' in part.lower() for part in [ids[value][0], current_path]):
return True
else:
ids[value] = [current_path]
# Check if 'abcd' is in the ID
if isinstance(value, str) and 'abcd' in value:
duplicate_keys.append(current_path)
return True
elif isinstance(value, (dict, list)):
if collect_ids(value, current_path):
return True
elif isinstance(obj, list):
for index, item in enumerate(obj):
current_path = f"{path}[{index}]"
if collect_ids(item, current_path):
return True
return False
has_duplicates = collect_ids(schema)
return has_duplicates, duplicate_keys
def update_nested_dict(self, original, updates):
if isinstance(original, dict) and isinstance(updates, dict):
for key, value in updates.items():
if key in original:
if isinstance(value, (dict, list)) and isinstance(original[key], (dict, list)):
self.update_nested_dict(original[key], value)
else:
original[key] = value
else:
original[key] = value
elif isinstance(original, list) and isinstance(updates, list):
for i, value in enumerate(updates):
if i < len(original):
if isinstance(value, (dict, list)) and isinstance(original[i], (dict, list)):
self.update_nested_dict(original[i], value)
else:
original[i] = value
else:
original.append(value)
return original
def check_empty_pairs(self, input_dict):
# Check if the input is a dictionary
if not isinstance(input_dict, dict):
return False
for key, value in input_dict.items():
# Check if the key is empty
if not key:
return True
# Check if the value is empty
if not value:
return True
# If the value is a dictionary, check it recursively
if isinstance(value, dict):
if self.check_empty_pairs(value):
return True
return False
def _get_tool_from_intent(self, intent):
tools = intent[-1]['method']['tool'] # Assumes intent[-1] is the highest priority
db_tool = {
"name": "db_tool",
"description": "This is a helper tool. It assists in finding and validating information from databases. "
"Use this as much as possible to fill out parameters of the tool function'."
" Find out information from Knowledge graph database.",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "queries like 'What are the names of all the companies'"},
"Tool_for_which_collecting_parameter": {
"type": "string",
"description": "step for which collecting parameters"}
},
"required": [
"query",
"Tool_for_which_collecting_parameter"
]
}
}
pagination_db_tool = {
"name": "db_tool_for_pagination",
"description": """This is a helper tool. It helps to fetch next page of a requested resources. Analyze the conversation history carefully before generating a query. Consider the following points:
1. Pagination: If this is a follow-up request, determine which page of results the user needs next.
2. Specificity: Take a close at chat_history and create a precise query that reflects the user's current request by determining that how many records are already shown they are to be skipped. Avoid generic 'show all' queries.
3. Context: Incorporate any relevant context from previous interactions.
4. Limits: Remember that the db_tool can't fetch all records at once. Specify a reasonable LIMIT (e.g., 20 items per page)
5. Request for listing all items of a resource: Review your history, if user is requesting all items of a resources whose count is greater than 20 than avoid creating such query for db_tool.""",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "queries like 'Fetch the next 20 [resource_type] records skipping the numbers of records which are already shown"}
}
}
}
if "db_tool" not in [tool['name'] for tool in tools]:
tools.append(db_tool)
if "db_tool_for_pagination" not in [tool['name'] for tool in tools]:
tools.append(pagination_db_tool)
return tools
async def start(self, query: str, chat_id=None, action_id=None):
response = await self.get_response_from_llm(query, chat_id, action_id)
content = await self.process_response(response)
if self.final_msg_formatting == MessageFormatting.DATABASE.value:
content = self.format_response(content)
return content
async def execute_api(self, payload, chat_id=None, action_id=None):
args = json.loads(json.dumps(payload))
logger.debug(f"Finalized ARGS created by LLM: {json.dumps(args, indent=2)}")
if args.get('path_params'):
for key, val in args["path_params"].items():
self.endpoint = self.endpoint.replace('{' + key + '}', val)
temp = dict()
for key, val in args.items():
if key != 'path_params':
temp[key] = val
args = temp
logger.debug("Preparing to make POST request to API")
logger.debug(f"Endpoint: {self.endpoint}")
logger.debug(f"Request body: {json.dumps(args, indent=2)}")
call_response = requests.post(
self.endpoint,
json=args,
headers={"Authorization": self.bearer}
)
response_data = call_response.json()
response_data.pop('fe_request_data', None)
response_data.pop('associated_roots', None)
call_response_for_llm = (
f"Status-code: {call_response.status_code}\n"
f"Response: {json.dumps(response_data, indent=2)} \ninform"
f" the user that you will receive an email notification once this action is completed."
)
if call_response.status_code in [202, 200]:
logger.info("************* Updating Activity Tracking ******************")
await update_activity_tracking(call_response.text, chat_id, action_id, self.user_id)
logger.debug('#' * 20)
logger.debug(f"API call response: {call_response_for_llm}")
logger.debug('#' * 20)
return call_response_for_llm
async def get_response_from_llm(self, query=None, chat_id=None, action_id=None):
if query:
logger.info(f"ADDING USER QUERY -> role='user', content={query}")
self.process_user_query(query=query)
try:
logger.info('TOOL NAMES')
logger.info([tool['name'] for tool in self.tools])
logger.info(self.tools[0])
logger.info(self.base_llm.messages)
if self.metadata.get("confirmation_stage"):
confirmation_tool = {
"name": "confirmation_tool",
"description": "This tool reviews users latest reply to confirmation of the values selected by a tool that has recently completed the payload. If the user confirms or agrees with the choices in the summary displayed, the Confirmation is true, but if user doesn't approve then summary then Confirmation is false."
"else False",
"input_schema": {
"type": "object",
"properties": {
"Confirmation": {
"type": "boolean",
"description": "Confirmation flag that is only True when user confirms the details and is ready to move forward. Analyze user last response. Don't get hallucinated by history"
}
},
"required": [
"Confirmation"
]
}
}
conformation_tools = self.tools
conformation_tools.append(confirmation_tool) if confirmation_tool['name'] not in [tool['name'] for tool
in
conformation_tools] \
else conformation_tools
chat_response = self.base_llm.process(tools=conformation_tools,
system=SYSTEM.format(schema=self.schema),
force_tool=True,
tool_name="confirmation_tool")
confirmation = chat_response['content'][-1]['input']["Confirmation"]
logger.info(confirmation)
if confirmation:
self.payload = self.update_nested_dict(self.payload, self.tools_output[self.tool_in_progress])
self.tools_confirmed.append(
self.tool_in_progress) if self.tool_in_progress not in self.tools_confirmed else self.tools_confirmed
self.metadata["tools_confirmed"] = self.tools_confirmed
self.confirmation_stage = False
self.metadata['confirmation_stage'] = False
self.tool_in_progress = None
self.previous_tool_in_progress = None
self.tool_force = False
logger.info(f"TOOL NAMES PASSED IN CONFIRMATION -> {[tool['name'] for tool in self.tools]}")
if (len(self.tools) - 2) == len(self.tools_confirmed):
if 'Confirmation' in self.payload:
del self.payload['Confirmation']
response = await self.execute_api(self.payload, chat_id, action_id)
self.base_llm.messages = self.base_llm.messages[:-1]
chat_response = await self.get_response_from_llm(query=response)
else:
chat_response = await self.get_response_from_llm()
else:
chat_response = self.base_llm.process(tools=self.tools, system=SYSTEM.format(schema=self.schema),
force_tool=self.tool_force,
tool_name=self.tool_in_progress if self.tool_in_progress else None)
elif self.tool_force:
chat_response = self.base_llm.process(tools=self.tools, system=SYSTEM.format(schema=self.schema),
force_tool=self.tool_force,
tool_name=self.tool_in_progress if self.tool_in_progress else None)
self.tool_force = False
elif len(self.base_llm.messages) == 1:
chat_response = self.base_llm.process(tools=self.tools, system=SYSTEM.format(schema=self.schema),
force_tool=True,
tool_name="db_tool")
else:
chat_response = self.base_llm.process(tools=self.tools, system=SYSTEM.format(schema=self.schema))
logger.info(chat_response)
if not chat_response:
logger.info("Received empty chat response")
return chat_response
except APIConnectionError as e:
logger.info(f"API connection error: {e}")
raise
except RateLimitError as e:
logger.info(f"Rate limit error: {e}")
except APIStatusError as e:
logger.info(f"API status error: {e}")
logger.info(f"Status code: {e.status_code}, Response: {e.response}")
async def process_response(self, response):
from app.main import app
if not response:
raise Exception("Invalid response from llm")
logger.info(f"RESPONSE --->>>>>>\n{response}")
response_content = response['content']
if response['stop_reason'] == 'tool_use':
for single_response in response_content:
if single_response['type'] == 'text':
if 'Response:' in response_content:
self.process_assistant_query(response_content)
feedback = "I can see Response field while calling the tool. If you want to take confirmation\
from user then take a break and take confirmation from user and then look for \
further value"
self.process_tool_result(tool_id=single_response['id'],
content=feedback,
is_error=True)
break
continue
self.process_assistant_query(response_content)
function_args, function_name = single_response['input'], single_response['name']
logger.info(f"function ARGS: {function_args}")
if 'db_tool' in function_name:
self.final_msg_formatting = MessageFormatting.DATABASE.value
question = function_args.get('query')
if not question:
feedback = 'query parameter cannot be empty'
self.process_tool_result(tool_id=single_response['id'],
content=feedback,
is_error=True)
break
self.tool_in_progress = function_args.get('Tool_for_which_collecting_parameter', None)
if not self.tool_in_progress:
feedback = 'Tool_for_which_collecting_parameter parameter cannot be empty'
self.process_tool_result(tool_id=single_response['id'],
content=feedback,
is_error=True)
break
if not self.previous_tool_in_progress:
self.previous_tool_in_progress = self.tool_in_progress
logger.info(f'{CYAN} tool in progress {self.tool_in_progress} {RESET}')
logger.info(f'{CYAN} previous tool in progress {self.tool_in_progress} {RESET}')
# when llm tries to switch to the tool of which paramters are already collected
if self.previous_tool_in_progress != self.tool_in_progress and self.tool_in_progress in self.tools_confirmed:
logger.info(f"{RED}-------------->>>>> inside the switching tool")
llm = AnthropicLLM()
llm.messages = self.base_llm.messages
content = [{
"type": "tool_result",
"tool_use_id": single_response['id'],
"content": 'lets wait and first review the query if we really need to switch tool or not'
}]
llm.messages.append({"role": 'user', "content": content})
logger.info(llm.messages)
tool_switcher = {
"name": "tool_selector",
"description": "This tool reviews users latest and current situation to find out if user is only interested in fetching some data or user wants to update any value. if user want to update any value then move to previous step else remain in same step and fetch data else False",
"input_schema": {
"type": "object",
"properties": {
"switch_tool": {
"type": "boolean",
"description": "Set this true if user wants to update any value of previous"
}
},
"required": [
"switch_tool"
]
},
}
tools = [tool_switcher]
tools.extend(self.tools)
chat_response = llm.process(tools=tools,
system=SYSTEM.format(schema=self.schema),
force_tool=True,
tool_name="tool_selector",
)
logger.info(chat_response)
self.switch_tool_id = chat_response['content'][0]['id']
self.process_assistant_query(chat_response['content'])
if chat_response['content'][0]['input']['switch_tool']:
self.switch_tool = True
logger.info("Switching tool is enabled.")
pass
else:
logger.info("Switching tool is disabled.")
feedback = f'''you did not need to switch Tool_for_which_collecting_parameter in db_tool \
because user does not want to change anything. just fetch the required information from the \
database using previous Tool_for_which_collecting_parameter {self.previous_tool_in_progress}'''
self.process_tool_result(tool_id=self.switch_tool_id, content=feedback)
break
if self.previous_tool_in_progress != self.tool_in_progress and self.switch_tool == False:
ready_to_move, feedback = self.tool_transition_status()
if not ready_to_move:
self.process_tool_result(tool_id=single_response['id'], content=feedback, is_error=True)
break
if any(keyword in question for keyword in ['MATCH', 'match', 'SELECT']):
feedback = f'The function argument you generated for db_tool was {question},\
but it appears to be in an incorrect format. To ensure proper functionality, please generate \
a natural language query for the db_tool instead of using Cypher query language. Respond with \
a valid JSON object containing a single key named "query" and its corresponding value as a \
string. The value should represent the natural language query you generated based on the \
given question.'
self.process_tool_result(tool_id=single_response['id'], content=feedback, is_error=True)
break
regions = ibm_region_mapper.keys()
question_lower_case = question.lower()
for region in regions:
region_lower = region.lower()
pattern = r'(?<![\w-]){}(?![\w-])'.format(re.escape(region_lower))
if re.search(pattern, question_lower_case):
question = re.sub(pattern, ibm_region_mapper[region], question, flags=re.IGNORECASE)
if function_name == 'db_tool_for_pagination':
logger.info("*"*200)
logger.info("RUNNING PAGINATION TOOL")
if self.last_tool == 'db_tool':
self.process_tool_result(
tool_id=single_response['id'],
content=f"""Internal Feedback: First show retrieved info to user then if user ask for further result only then retrieve that info""")
# Fetching new response after handling tool call
new_response = self.get_response_from_llm()
return await self.process_response(new_response)
self.neo4j_client.llm_client.messages = list()
result, cypher = self.neo4j_client.run(
question=question, pagination=True, chat_history=self.llm_chat_history
)
else:
self.last_tool = 'db_tool'
self.neo4j_client.llm_client.messages = list()
result, cypher = self.neo4j_client.run(
question=question
)
if not self.switch_tool:
self.process_tool_result(
tool_id=single_response['id'],
content=f"cypher: {cypher} and result:{result}. Present this info in pretty way to user \
specially the result and don't show ids etc unless asked")
elif self.switch_tool:
self.switch_tool = False
self.process_tool_result(
tool_id=self.switch_tool_id,
content=f"cypher: {cypher} and result:{result}. Present this info in pretty way to user \
specially the result and don't show ids etc unless asked")
elif function_name in self.api_function_names:
logger.info(f'{CYAN} tool in progress {self.tool_in_progress} {RESET}')
logger.info(f'{CYAN} previous tool in progress {self.tool_in_progress} {RESET}')
logger.info(function_name)
self.tool_in_progress = function_name
if not self.previous_tool_in_progress:
self.previous_tool_in_progress = self.tool_in_progress
if self.previous_tool_in_progress != self.tool_in_progress:
ready_to_move, feedback = self.tool_transition_status()
if not ready_to_move:
self.process_tool_result(tool_id=single_response['id'], content=feedback, is_error=True)
if ready_to_move:
# as of don't know when this can happen
raise Exception('tool called and its in ready to move status')
else:
self.tools_output[self.tool_in_progress] = function_args
logger.info(self.required_keys[self.previous_tool_in_progress])
logger.info(self.tools_output.get(self.previous_tool_in_progress))
self.payload = self.update_nested_dict(self.payload, function_args)
text = str(self.tools_output.get(self.tool_in_progress, {}))
schema_str = text.replace("[", "").replace("]", "")
schema_dict = ast.literal_eval(schema_str)
result, missing_keys, incorrect_ids = self.keys_exist_in_schema(
self.required_keys[self.tool_in_progress],
schema_dict)
duplicate, duplicate_keys = self.check_duplicate_ids(
self.tools_output.get(self.previous_tool_in_progress))
logger.info(
f"{RED} result: {result}, missing {missing_keys}, incorrect_ids {incorrect_ids},duplicate {duplicate} duplicate {duplicate} duplicate_ids {duplicate_keys} {RESET}")
if duplicate:
logger.info(f"{RED} inside correct")
self.process_tool_result(
tool_id=single_response['id'],
content=duplicate_ids_feedback.format(duplicate_keys=duplicate_keys,
tool=self.previous_tool_in_progress),
is_error=True)
elif missing_keys:
self.process_tool_result(
tool_id=single_response['id'],
content=missing_keys_feedback.format(missing_keys=missing_keys,
tool=self.previous_tool_in_progress),
is_error=True)
elif incorrect_ids:
incorrect_id_error = incorrect_keys_feedback.format(incorrect_ids=incorrect_ids,
tool=self.previous_tool_in_progress)
self.process_tool_result(single_response['id'], incorrect_id_error, is_error=True)
elif result:
self.process_tool_result(
tool_id=single_response['id'],
content=confirmation_feedback)
self.confirmation_stage = True
self.final_msg_formatting = MessageFormatting.PAYLOAD.value
else:
logger.info(f"Wrong function called : {function_name}")
# Fetching new response after handling tool call
new_response = await self.get_response_from_llm()
return await self.process_response(new_response)
elif response_content and response_content[0].get('text'):
logger.info("Processing user control response")
self.process_assistant_query(response_content)
user_message = response_content[0]['text']
if 'Thought:' in user_message and 'Response:' not in user_message:
logger.info("******** Response key is missing in response, Retrying to get response from LLM *********")
self.base_llm.add_message(role='user',
content=f'Internal Feedback: You did not responded correctly and missed'
f' generating response in Response: key. Do not generate it as a tag'
f' like <Response> instead generate the response with key Response:')
response = await self.get_response_from_llm()
return await self.process_response(response)
elif "Response:" in user_message:
user_message = user_message.rsplit('Response:')[1]
logger.info(user_message)
return user_message
else:
return user_message
else:
raise Exception(f"Unexpected response format {response}")
def process_user_query(self, query):
self.base_llm.add_message(role="user", content=query)
def process_assistant_query(self, query):
self.base_llm.add_message(role="assistant", content=query)
def process_tool_result(self, tool_id, content, is_error=False):
content = [{
"type": "tool_result",
"tool_use_id": tool_id,
"content": content,
"is_error": is_error
}]
self.base_llm.add_message(role="user", content=content)
def tool_transition_status(self):
logger.info(self.required_keys[self.previous_tool_in_progress])
logger.info(self.tools_output.get(self.previous_tool_in_progress))
duplicate, duplicate_keys = self.check_duplicate_ids(self.tools_output.get(self.previous_tool_in_progress))
text = str(self.tools_output.get(self.previous_tool_in_progress, {}))
schema_str = text.replace("[", "").replace("]", "")
schema_dict = ast.literal_eval(schema_str)
result, missing_keys, incorrect_ids = self.keys_exist_in_schema(
self.required_keys[self.previous_tool_in_progress],
schema_dict)
logger.info(
f"{RED} result: {result}, missing {missing_keys}, incorrect_ids {incorrect_ids},duplicate {duplicate} duplicate {duplicate} duplicate_ids {duplicate_keys} {RESET}")
if not self.tools_output.get(self.previous_tool_in_progress):
feedback = f'Before moving to next tool. Call tool {self.previous_tool_in_progress} with \
fields so i can track what fields have collected so far'
self.tool_force = True
self.tool_in_progress = self.previous_tool_in_progress # shows not ready to move to next step
return False, feedback
elif duplicate:
feedback = duplicate_ids_feedback.format(duplicate_keys=duplicate_keys, tool=self.previous_tool_in_progress)
self.tool_in_progress = self.previous_tool_in_progress # shows not ready to move to next step
return False, feedback
elif missing_keys:
feedback = missing_keys_feedback.format(missing_keys=missing_keys, tool=self.previous_tool_in_progress)
self.tool_in_progress = self.previous_tool_in_progress
return False, feedback
elif incorrect_ids:
feedback = incorrect_keys_feedback.format(incorrect_ids=incorrect_ids, tool=self.previous_tool_in_progress)
self.tool_in_progress = self.previous_tool_in_progress # shows not ready to move to next step
return False, feedback
elif result and self.previous_tool_in_progress not in self.tools_confirmed:
feedback = confirmation_feedback
self.tool_in_progress = self.previous_tool_in_progress # shows not ready to move to next step
self.confirmation_stage = True
return False, feedback
return True, ''
def format_response(self, content):
try:
from app.whisper.llms.groq import GroqLLM
formatting_bot = GroqLLM()
if self.final_msg_formatting == MessageFormatting.DATABASE.value:
formatting_bot.add_message(role="system",
content=DB_RESULT_FORMAT_PROMPT.format(content=content))
elif self.final_msg_formatting == MessageFormatting.PAYLOAD.value:
formatting_bot.add_message(role="system",
content=PAYLOAD_RESULT_FORMAT_PROMPT)
formatting_bot.add_message(role=WHISPER_USER_ROLE, content=content)
streaming_obj = formatting_bot.process()
return streaming_obj
except Exception as e:
logger.error(f"Error during formatting response: {e}")
return content
|
CloudWhisperCustomBot | app/whisper/utils/action_engine/base.py | import copy
import json
import re
import uuid
import requests
from anthropic import APIConnectionError, RateLimitError, APIStatusError
from loguru import logger
from app.web.common.utils import update_activity_tracking
from app.whisper.consts import WHISPER_USER_ROLE, WHISPER_ASSISTANT_ROLE
from app.whisper.llms.anthropic import AnthropicLLM
from app.whisper.utils.action_engine.consts import ibm_region_mapper, MessageFormatting
from app.whisper.utils.action_engine.prompt import SYSTEM
from app.whisper.utils.neo4j.client import Neo4j
from app.whisper.utils.pagination_utils import generate_count_query
from app.whisper.utils.prompt import DB_RESULT_FORMAT_PROMPT, PAYLOAD_RESULT_FORMAT_PROMPT
class ActionPhaseClaude():
def __init__(self, intent: str, chat_history: list, bearer: str, user_id: str, metadata_dict, cloud_id):
self.base_llm = AnthropicLLM()
self.base_llm.messages = chat_history # it adds messages in self.base_llm
self.endpoint = intent[-1]['method']['endpoint']
self.api_function_name = intent[-1]['method']['tool']['name']
self.api_function_schema = intent[-1]['method']['tool']['input_schema']
self.bearer = bearer
self.tools = self._get_tool_from_intent(intent)
self.last_tool = None
self.user_id = user_id
self.cloud_id = cloud_id
self.llm_chat_history = ''
self.final_msg_formatting = MessageFormatting.DISABLED.value
for chat_ in self.base_llm.messages[-10:]:
logger.info(chat_) # don't add user latest query in chat history
self.llm_chat_history += f"<{chat_['role'].lower()}>: {json.dumps(chat_['content'])}</{chat_['role'].lower()}>\n"
self.response_id = metadata_dict.get('response_id', '')
self.confirm_execution = metadata_dict.get('confirm_execution')
self.confirm_in_tool = metadata_dict.get('confirm_in_tool')
self.func_args = metadata_dict.get('payload', {})
self.metadata_dict = metadata_dict
from app.main import app
self.neo4j_client = Neo4j(db_session=app.state.neo4j_session, user_id=self.user_id, cloud_id=self.cloud_id,
vpc=True)
self.schema = self.neo4j_client.schema
def _get_tool_from_intent(self, intent):
tools = [intent[-1]['method']['tool']] # Assumes intent[-1] is the highest priority
db_tool = {
"name": "db_tool",
"description": "This is a helper tool. It assists in finding and validating information from databases. "
"Use this as much as possible to fill out parameters of the tool '{function_name}'."
" Find out information from Knowledge graph database.",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "queries like 'What are the names of all the companies'"}
}
}
}
pagination_db_tool = {
"name": "db_tool_for_pagination",
"description": """This is a helper tool. It helps to fetch next page of a requested resources. Analyze the conversation history carefully before generating a query. Consider the following points:
1. Pagination: If this is a follow-up request, determine which page of results the user needs next.
2. Specificity: Take a close at chat_history and create a precise query that reflects the user's current request by determining that how many records are already shown they are to be skipped. Avoid generic 'show all' queries.
3. Context: Incorporate any relevant context from previous interactions.
4. Limits: Remember that the db_tool can't fetch all records at once. Specify a reasonable LIMIT (e.g., 20 items per page)
5. Request for listing all items of a resource: Review your history, if user is requesting all items of a resources whose count is greater than 25 than avoid creating such query for db_tool.""",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "queries like 'Fetch the next 20 [resource_type] records skipping the numbers of records which are already shown"}
}
}
}
description = db_tool["description"]
updated_description = description.format(function_name=self.api_function_name)
db_tool["description"] = updated_description
tools.append(db_tool)
tools.append(pagination_db_tool)
return tools
async def start(self, query: str, chat_id=None, action_id=None):
logger.info(f"********{chat_id}****************8")
logger.info(f"********{action_id}****************8")
response = await self.get_response_from_llm(query, chat_id=chat_id, action_id=action_id)
content = await self.process_response(response=response, chat_id=chat_id, action_id=action_id)
if self.final_msg_formatting == MessageFormatting.DATABASE.value:
content = self.format_response(content)
return content
async def get_response_from_llm(self, query='', chat_id=None, action_id=None):
if query:
if self.metadata_dict.get('confirm_execution'):
self.base_llm.add_message(role=WHISPER_USER_ROLE, content=query)
confirmation_tool = {
"name": "confirmation_tool",
"description": "This tool reviews users latest reply to confirmation of the values selected by a tool that has recently completed the payload. If the user confirms or agrees with the choices in the summary displayed, the Confirmation is true, but if user doesn't approve then summary then Confirmation is false."
"else False",
"input_schema": {
"type": "object",
"properties": {
"Confirmation": {
"type": "boolean",
"description": "Confirmation flag that is only True when user confirms the details and is ready to move forward. Analyze user last response. Don't get hallucinated by history"
}
},
"required": [
"Confirmation"
]
}
}
confirmation_tools = self.tools
confirmation_tools.append(confirmation_tool) if confirmation_tool['name'] not in [tool['name'] for tool
in
confirmation_tools] \
else confirmation_tools
logger.info(self.base_llm.messages)
chat_response = self.base_llm.process(tools=confirmation_tools,
system=SYSTEM.format(schema=self.schema),
force_tool=True,
tool_name="confirmation_tool")
confirmation = chat_response['content'][-1]['input']["Confirmation"]
self.metadata_dict['confirm_execution'] = False
logger.info(confirmation)
if confirmation:
for message in self.base_llm.messages[::-1]: # iterate list in reverse
logger.info(message)
if isinstance(message['content'], list):
for message_content in message['content']:
logger.info(message_content)
if message_content.get('type') and message_content['type'] == 'tool_use':
self.base_llm.messages[-1]['content'] = query
try:
logger.info(self.base_llm.messages)
chat_response = self.base_llm.process(tools=self.tools,
system=SYSTEM.format(schema=self.schema),
force_tool=True,
tool_name=message_content['name'])
logger.info(chat_response)
self.process_assistant_query(chat_response['content'])
for single_response in chat_response['content']:
if single_response['type'] == 'tool_use':
tool_id = single_response['id']
self.func_args = message_content['input']
if 'scheduled_policy' in self.func_args:
id = uuid.uuid4().hex
self.func_args['scheduled_policy']['id'] = id
self.func_args['scheduled_policy'].pop('backup_frequency')
self.func_args['scheduled_policy'].pop('backup_time')
self.func_args['scheduled_policy'].pop('backup_day')
call_response_for_llm = await self.api_call(chat_id, action_id)
self.process_tool_result(tool_id, call_response_for_llm)
chat_response = self.base_llm.process(tools=self.tools,
system=SYSTEM.format(schema=self.schema))
return chat_response
except APIConnectionError as e:
logger.info(f"API connection error: {e}")
raise
except RateLimitError as e:
logger.info(f"Rate limit error: {e}")
except APIStatusError as e:
logger.info(f"API status error: {e}")
logger.info(f"Status code: {e.status_code}, Response: {e.response}")
else:
if len(self.base_llm.messages) == 0:
query = f"{query} \n\nInternal Feedback: IBM Cloud id {self.cloud_id} \n Take a break and then start"
self.base_llm.add_message(role='user', content=query)
try:
system = SYSTEM.format(schema=self.schema)
if len(self.base_llm.messages) == 1:
chat_response = self.base_llm.process(tools=self.tools, system=system, prompt_caching=True,
force_tool=True, tool_name='db_tool')
else:
chat_response = self.base_llm.process(tools=self.tools, system=system, prompt_caching=True)
logger.info(chat_response)
if not chat_response:
logger.info("Received empty chat response")
return chat_response
except APIConnectionError as e:
logger.info(f"API connection error: {e}")
raise
except RateLimitError as e:
logger.info(f"Rate limit error: {e}")
except APIStatusError as e:
logger.info(f"API status error: {e}")
logger.info(f"Status code: {e.status_code}, Response: {e.response}")
def extract_keys(self, schema, parent_key=None):
required_dict = {}
if isinstance(schema, dict):
for key, value in schema.items():
# Check if the current key is 'required' and the parent is not 'items'
if key == 'required' and isinstance(value, list) and parent_key != 'items':
if parent_key:
if parent_key in required_dict:
required_dict[parent_key].extend(value)
else:
required_dict[parent_key] = value
else:
required_dict[key] = value
# Recurse into nested dictionaries
if isinstance(value, dict):
nested_required_values = self.extract_keys(value, key)
for nested_key, nested_value in nested_required_values.items():
if nested_key in required_dict:
required_dict[nested_key].extend(nested_value)
else:
required_dict[nested_key] = nested_value
return required_dict
def keys_exist_in_schema(self, required_keys_dict, schema):
def check_key_in_schema(schema, key, parent_key=None):
if isinstance(schema, dict):
if parent_key:
if parent_key in schema and isinstance(schema[parent_key], dict) and key in schema[parent_key]:
if key == 'id' or 'id' in key:
id_value = schema[parent_key][key]
if not re.match(r'^[0-9a-f]{32}$', id_value):
return False, f"{parent_key}.{key}"
return True, None
elif key in schema:
if key == 'id' or 'id' in key:
id_value = schema[key]
if not re.match(r'^[0-9a-f]{32}$', id_value):
return False, key
return True, None
for sub_key, sub_value in schema.items():
if isinstance(sub_value, dict):
result, incorrect_key = check_key_in_schema(sub_value, key, parent_key)
if result:
return True, None
elif incorrect_key:
return False, incorrect_key
return False, None
missing_keys = []
incorrect_id_keys = []
for parent_key, keys in required_keys_dict.items():
if parent_key == 'required':
for key in keys:
if key not in schema:
missing_keys.append(key)
else:
for key in keys:
result, incorrect_key = check_key_in_schema(schema, key, parent_key)
if not result:
if incorrect_key:
incorrect_id_keys.append(incorrect_key)
else:
missing_keys.append(f"{parent_key}.{key}" if parent_key else key)
if missing_keys or incorrect_id_keys:
return False, missing_keys, incorrect_id_keys
return True, None, None
async def api_call(self, chat_id, action_id):
args = json.loads(json.dumps(self.func_args))
logger.debug(f"Finalized ARGS created by LLM: {json.dumps(args, indent=2)}")
if args.get('path_params'):
for key, val in args["path_params"].items():
self.endpoint = self.endpoint.replace('{' + key + '}', val)
temp = dict()
for key, val in args.items():
if key != 'path_params':
temp[key] = val
args = temp
logger.debug("Preparing to make POST request to API")
logger.debug(f"Endpoint: {self.endpoint}")
logger.debug(f"Request body: {json.dumps(args, indent=2)}")
call_response = requests.post(
self.endpoint,
json=args,
headers={"Authorization": self.bearer}
)
response_data = call_response.json()
response_data.pop('fe_request_data', None)
response_data.pop('associated_roots', None)
call_response_for_llm = (
f"Status-code: {call_response.status_code}\n"
f"Response: {json.dumps(response_data, indent=2)} \ninform"
f" the user that you will receive an email notification once this action is completed."
)
if call_response.status_code in [202, 200]:
logger.info("************* Updating Activity Tracking ******************")
await update_activity_tracking(call_response.text, chat_id, action_id, self.user_id)
logger.debug('#' * 20)
logger.debug(f"API call response: {call_response_for_llm}")
logger.debug('#' * 20)
return call_response_for_llm
async def process_response(self, response, chat_id, action_id):
if not response:
raise Exception("Invalid response from llm")
logger.info(response)
total_count = 0
remaining_count = 0
response_content = response['content']
if response['stop_reason'] == 'tool_use':
for single_response in response_content:
if single_response['type'] == 'text':
if 'Confirmation Flag: True' in single_response['text']:
self.confirm_in_tool = True
self.process_assistant_query(response_content)
if single_response['type'] != 'tool_use':
continue
if self.confirm_in_tool:
self.confirm_in_tool = False
call_response_for_llm = await self.api_call(chat_id=chat_id, action_id=action_id)
self.process_tool_result(single_response['id'], call_response_for_llm)
new_response = await self.get_response_from_llm(chat_id=chat_id, action_id=action_id)
return await self.process_response(new_response, chat_id, action_id)
pass
self.process_assistant_query(response_content)
function_args, function_name = single_response['input'], single_response['name']
logger.info(f"function ARGS: {function_args}")
if 'db_tool' in function_name:
self.final_msg_formatting = MessageFormatting.DATABASE.value
question = function_args['query']
if any(keyword in question for keyword in ['MATCH', 'match', 'SELECT']):
feedback = f'Internal feedback: The function argument you generated for db_tool was {question},\
but it appears to be in an incorrect format. To ensure proper functionality, please generate \
a natural language query for the db_tool instead of using Cypher query language. Respond with \
a valid JSON object containing a single key named "query" and its corresponding value as a \
string. The value should represent the natural language query you generated based on the \
given question.'
self.process_tool_result(tool_id=single_response['id'], content=feedback)
break
regions = ibm_region_mapper.keys()
question_lower_case = question.lower()
for region in regions:
region_lower = region.lower()
pattern = r'(?<![\w-]){}(?![\w-])'.format(re.escape(region_lower))
if re.search(pattern, question_lower_case):
question = re.sub(pattern, ibm_region_mapper[region], question, flags=re.IGNORECASE)
logger.info(self.cloud_id)
if function_name == 'db_tool_for_pagination':
if self.last_tool == 'db_tool':
self.process_tool_result(
tool_id=single_response['id'],
content=f"""Internal Feedback: First show retrieved info to user then if user ask for further result only then retrieve that info""")
# Fetching new response after handling tool call
new_response = await self.get_response_from_llm(chat_id=chat_id, action_id=action_id)
return await self.process_response(new_response, chat_id, action_id)
self.neo4j_client.llm_client.messages = list()
self.neo4j_client.retry = 0
result, cypher = self.neo4j_client.run(
question=question, pagination=True, chat_history=self.llm_chat_history
)
else:
self.last_tool == 'db_tool'
self.neo4j_client.llm_client.messages = list()
result, cypher = self.neo4j_client.run(
question=question
)
if cypher and cypher != 'empty':
count_query = generate_count_query(cypher)
self.neo4j_client.llm_client.messages = list()
total_count, _ = self.neo4j_client.run(
cypher=count_query
)
logger.info(total_count)
if total_count:
total_count = total_count[1][0]
if result and isinstance(total_count, int):
remaining_count = total_count - (len(result) - 1)
logger.info(
"================================ Total Count in 1st Go =========================")
logger.info(total_count)
logger.info(remaining_count)
logger.info(
"================================ Total Count in 1st Go =========================")
if cypher == 'empty':
tool_result = "db_tool returned empty result. Please plan next step accordingly or try another way to query"
elif total_count and total_count <= 20:
tool_result = f"""Result returned by db_tool ios enclosed in double quotes \"cypher: {cypher} \\n
result: {result} \\n
total_count: {total_count} \\n
Present this info to user. Don't hallucinate"""
elif total_count:
tool_result = f"""Result returned by db_tool is enclosed in double quotes:
"cypher: {cypher}
result: {result}
total_count: {total_count}"
Present this information to the user, keeping pagination-related guidelines in mind. Consider \
how to present information when more records exist in the database without forgetting your main\
job. Don't hallucinate"""
else:
tool_result = f"""Result returned by db_tool is enclosed in double quotes:
"cypher: {cypher}
result: {result}
total_count: couldn't find tool result, figure out yourself"
Present this information to the user, keeping pagination-related guidelines in mind. Consider \
how to present information when more records exist in the database without forgetting your main\
job. Don't hallucinate"""
self.process_tool_result(
tool_id=single_response['id'],
content=tool_result)
elif function_name == self.api_function_name:
try:
api_function_schema_copy = copy.deepcopy(self.api_function_schema)
schema_required_keys = self.extract_keys(api_function_schema_copy)
result, missing_keys, incorrect_ids = self.keys_exist_in_schema(schema_required_keys,
function_args)
if result:
logger.info("All keys exist in the schema.")
else:
if missing_keys:
logger.info(f"The following keys are missing: {missing_keys}")
missing_keys_error = f'''The following keys are missing from the provided payload: {missing_keys}. \
Please ensure that you populate these keys with the required values before invoking this function. \
Failing to provide the necessary data may result in unexpected behavior or errors during execution.\
Review the provided input schema of this function and make sure you follow correct format of the payload \
along with all required keys. if there is any extra key present, remove it.
Once you have filled in the missing information, you can proceed with calling the function as intended.'''
self.process_tool_result(single_response['id'], missing_keys_error)
break
elif incorrect_ids:
logger.info(f"The following keys have incorrect 'id' values: {incorrect_ids}")
incorrect_id_error = f'''The following keys have incorrect 'id' values in the provided payload: {incorrect_ids}. \
Please ensure that you provide valid 'id' values for these keys before invoking this function.
It seems like you might have accidentally added the name of the resource instead of its corresponding 'id'. \
If you haven't fetched the 'id' of this resource before from db_tool, make sure to retrieve it first.
Incorrect 'id' values may lead to errors or unexpected behavior during execution.
Once you have corrected the 'id' values, you can proceed with calling the function as intended.'''
self.process_tool_result(single_response['id'], incorrect_id_error)
break
self.metadata_dict['confirm_execution'] = True
self.metadata_dict['payload'] = function_args
self.metadata_dict['response_id'] = single_response['id']
function_args_copy = function_args.copy()
logger.info(function_args_copy)
if function_args_copy.get('scheduled_policy'):
function_args_copy['scheduled_policy'].pop('scheduled_cron_pattern')
logger.info(function_args_copy)
function_args_str = str(function_args_copy)
logger.info(function_args_str)
pattern = r"[a-f0-9]{32}"
ids = re.findall(pattern, function_args_str)
for id in ids:
cypher = f'''MATCH(n) WHERE n.id = '{id}' RETURN n.name'''
self.neo4j_client.llm_client.messages = list()
result = self.neo4j_client.query_database(
cypher
)
logger.info(result)
if result and isinstance(result, list) and len(result) > 1:
name = result[1][0] if isinstance(result[1], list) and len(result[1]) > 0 else str(
result[1])
logger.info(function_args_str)
function_args_str = function_args_str.replace(id, name)
function_args_str = function_args_str.replace('id', 'name')
logger.info(function_args_str)
confirmation_llm = AnthropicLLM()
confirmation_llm.add_message(role=WHISPER_USER_ROLE, content=f'''here is the payload enclosed in double\
quotes\n"{function_args_str}" your job is to show this to the \
user in a properly formatted way in the human readable format not similar to payload and
ask for confirmation if all the \
information is correct. Start your response directly without mentioning anything like here is your information. Act like you are taking confirmation before performing the action. ''')
message_str = confirmation_llm.process()
self.process_tool_result(tool_id=single_response['id'],
content=message_str)
self.final_msg_formatting = MessageFormatting.PAYLOAD.value
return message_str
except APIConnectionError as e:
logger.info(f"API connection error: {e}")
raise
except RateLimitError as e:
logger.info(f"Rate limit error: {e}")
except APIStatusError as e:
logger.info(f"API status error: {e}")
logger.info(f"Status code: {e.status_code}, Response: {e.response}")
else:
logger.info(f"Wrong function called : {function_name}")
# Fetching new response after handling tool call
new_response = await self.get_response_from_llm(chat_id=chat_id, action_id=action_id)
return await self.process_response(new_response, chat_id, action_id)
elif response_content and response_content[0].get('text'):
logger.info("Processing user control response")
self.process_assistant_query(response_content)
user_message = response_content[0]['text']
if ('Thought:' in user_message or 'Hallucination:' in user_message) and 'Response:' not in user_message:
logger.info("******** Response key is missing in response, Retrying to get response from LLM *********")
self.base_llm.add_message(role='user',
content=f'Internal Feedback: You did not responded correctly and missed'
f' generating response in Response: key. Do not generate it as a tag'
f' like <Response> instead generate the response with key Response:')
retry_response = await self.get_response_from_llm(chat_id=chat_id, action_id=action_id,
db_session=db_session)
logger.info(retry_response)
return await self.process_response(retry_response, chat_id, action_id)
elif "Response:" in user_message:
user_message = user_message.rsplit('Response:')[1]
logger.info(user_message)
return user_message
else:
return user_message
else:
raise Exception(f"Unexpected response format {response}")
def process_user_query(self, query):
self.base_llm.add_message(role=WHISPER_USER_ROLE, content=query)
def process_assistant_query(self, query):
self.base_llm.add_message(role=WHISPER_ASSISTANT_ROLE, content=query)
def process_tool_result(self, tool_id, content, is_error=False):
content = [{
"type": "tool_result",
"tool_use_id": tool_id,
"content": content,
"is_error": is_error
}]
self.base_llm.add_message(role=WHISPER_USER_ROLE, content=content)
def format_response(self, content):
try:
from app.whisper.llms.groq import GroqLLM
formatting_bot = GroqLLM()
if self.final_msg_formatting == MessageFormatting.DATABASE.value:
formatting_bot.add_message(role="system",
content=DB_RESULT_FORMAT_PROMPT.format(content=content))
elif self.final_msg_formatting == MessageFormatting.PAYLOAD.value:
formatting_bot.add_message(role="system",
content=PAYLOAD_RESULT_FORMAT_PROMPT.format(content=content))
streaming_obj = formatting_bot.process()
return streaming_obj
except Exception as e:
logger.error(f"Error during formatting response: {e}")
return content
|
CloudWhisperCustomBot | app/whisper/utils/qna_bot/consts.py | ibm_region_mapper = {
"dallas": "us-south",
"dallas tx": "us-south",
"dallas texas": "us-south",
"sydney": "au-syd",
"sydney au": "au-syd",
"sydney australia": "au-syd",
"london": "eu-gb",
"london uk": "eu-gb",
"london united kingdom": "eu-gb",
"frankfurt": "eu-de",
"frankfurt de": "eu-de",
"frankfurt germany": "eu-de",
"osaka": "jp-osa",
"osaka jp": "jp-osa",
"osaka japan": "jp-osa",
"tokyo": "jp-tok",
"tokyo jp": "jp-tok",
"tokyo japan": "jp-tok",
"sao paulo": "br-sao",
"sao paulo br": "br-sao",
"sao paulo brazil": "br-sao",
"toronto": "ca-tor",
"toronto ca": "ca-tor",
"toronto canada": "ca-tor",
"madrid": "eu-es",
"madrid es": "eu-es",
"madrid spain": "eu-es",
"washington dc": "us-east",
"washington": "us-east",
"dc": "us-east",
"d.c.": "us-east",
"dc.": "us-east",
"washington d.c.": "us-east",
"washington d.c": "us-east",
"washington usa": "us-east",
"washington us": "us-east",
"washington district of columbia": "us-east"
}
|
CloudWhisperCustomBot | app/whisper/utils/qna_bot/__init__.py | from app.whisper.utils.action_engine.base import ActionPhaseClaude
__all__ = ['ActionPhaseClaude'] |
CloudWhisperCustomBot | app/whisper/utils/qna_bot/base.py | import mailchimp_transactional as MailchimpTransactional
from anthropic import APIConnectionError, RateLimitError, APIStatusError
from loguru import logger
from mailchimp_transactional.api_client import ApiClientError
from app.core.config import settings
from app.whisper.llms.anthropic import AnthropicLLM
from app.whisper.utils.qna_bot.prompt import SYSTEM
class QnABot():
def __init__(self, chat_history: list, user_name: str):
self.base_llm = AnthropicLLM()
self.base_llm.messages = self.base_llm.format_chat_history(chat_history, update_messages=True)
self.tools = self._get_tool_from_intent()
self.user_name = user_name
def _get_tool_from_intent(self):
question_answer_tool = {
"name": "question_answer_tool",
"description": "Utilize this tool to respond to general questions related to Wanclouds' services and offerings, including the VPC+ product, cloud services, and migration concepts.",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Questions about product offerings, services, migration processes, or any general inquiries like 'Tell me about your offerings'"
}
}
}
}
schedule_a_call_tool = {
"name": "schedule_a_call_tool",
"description": "This tool assists users in scheduling a call with Wanclouds by submitting their request. Activate this tool only when the user explicitly expresses interest in making a request or scheduling a call. Ensure to collect all necessary information from the user before invoking the tool.",
"input_schema": {
"type": "object",
"properties": {
"request_type": {
"type": "string",
"description": "The type of request the user wants to make. Prompt the user to choose one from these options: [IBM Cloud Datacenter Migration, Migration to IBM Cloud VPC, Implement IBM IKS and Red Hat OpenShift DRaaS, Setup Disaster Recovery, Upgrades for Windows/Linux OS or Databases, Multi-Cloud Managed Service, Other Requests]"
},
"message": {
"type": "string",
"description": "Additional information or details about the user's request. Ask the user to provide this."
}
},
"required": ["request_type", "message"]
}
}
return [question_answer_tool, schedule_a_call_tool]
async def start(self, query: str):
response = self.get_response_from_llm(query)
if not response:
logger.error("LLM returned None response.")
raise Exception("LLM returned a None response.")
if 'content' not in response:
logger.error(f"Invalid response structure: {response}")
raise Exception("Invalid response from LLM")
content = await self.process_response(response)
return content
def get_response_from_llm(self, query=''):
""" Get the initial response from LLM """
if query:
self.process_user_query(query=query)
try:
chat_response = self.base_llm.process(tools=self.tools, system=SYSTEM)
logger.info("*************get_response_from_llm*******************")
logger.info(f"Got response: {chat_response}")
logger.info("*************get_response_from_llm*******************")
if not chat_response:
logger.info("Received empty chat response")
return None
return chat_response
except (APIConnectionError, RateLimitError, APIStatusError) as e:
logger.error(f"API Error while fetching response from LLM: {e}")
raise
async def process_response(self, response):
try:
response_content = response['content']
except KeyError:
logger.error(f"Missing 'content' key in the response: {response}")
raise Exception("Invalid response structure from LLM. Missing 'content' key.")
if response['stop_reason'] == 'tool_use':
logger.info("LLM wants to use a tool")
for single_response in response_content:
if single_response['type'] != 'tool_use':
continue
function_args, function_name = single_response['input'], single_response['name']
logger.info(f"Function Name: {function_name}")
logger.info(f"Function ARGS: {function_args}")
if function_name == 'question_answer_tool':
await self.handle_question_answer_tool(response_content, function_args, single_response)
elif function_name == 'schedule_a_call_tool' or function_name == 'confirmation_tool':
logger.info(f"Function Name: {function_name} handle_schedule_tool being used")
await self.handle_schedule_tool(response_content, function_name, function_args, single_response)
else:
logger.warning(f"Unknown function: {function_name}")
# Get a new response after handling all tools
new_response = self.get_response_from_llm()
if not new_response.get('content'):
self.process_assistant_query('None')
self.process_user_query("you sent empty response, please generate a response without apologizing as this is internal feedback")
new_response = self.get_response_from_llm()
return await self.process_response(new_response)
elif response_content and response_content[0].get('text'):
logger.info("Processing user control response")
self.process_assistant_query(response_content)
user_message = response_content[0]['text']
if 'Thought:' in user_message and 'Response:' not in user_message:
logger.info("******** Response key is missing in response, Retrying to get response from LLM *********")
self.base_llm.add_message(role='user',
content=f'Internal Feedback: You did not responded correctly and missed'
f' generating response in Response: key. Do not generate it as a tag'
f' like <Response> instead generate the response with key Response:')
response = self.get_response_from_llm()
return await self.process_response(response)
elif "Response:" in user_message:
user_message = user_message.rsplit('Response:')[1]
return user_message
else:
return user_message
async def handle_question_answer_tool(self, response_content, function_args, single_response):
# This is working fine dont change
self.process_assistant_query(response_content)
from app.web.common.chats_websockets_utils import get_base_bot_response
payload = {"question": function_args['query'], "messages": []}
assistant_message = ""
try:
async for chunk in get_base_bot_response(payload):
response_lines = chunk.splitlines()
for line in response_lines:
if line.startswith("data:"):
chunk = line.split(": ")[1]
assistant_message += chunk
if not assistant_message.strip():
raise ValueError("Received an empty response from the assistant")
logger.info(f"Response from base bot : {assistant_message}")
except Exception as e:
logger.error(f"An error occurred in execute_qna: {str(e)}")
raise e
if len(self.base_llm.messages) == 2:
feedback = f"""Take a moment to reflect. Then, as a Wanclouds support agent:
1. Start with a greeting.
2. Craft a professional response using this information with proper formatting:
"{assistant_message.strip()}"
3. Don't mention any internal tools or processes.
4. Invite follow-up questions.
5. Don't mention multi_cloud or across clouds or Multi-Cloud instead mention only IBM Cloud
Ensure your response is natural and addresses the user's query effectively. Response:"""
else:
feedback = f"""Take a moment to reflect. Then, as a Wanclouds support agent:
1. Craft a professional response using this information with proper formatting:
"{assistant_message.strip()}"
2. Don't mention any internal tools or processes.
3. Invite follow-up questions.
4. Don't mention multi_cloud or across clouds or Multi-Cloud instead mention only IBM Cloud
Ensure your response is natural and addresses the user's query effectively. Response:"""
self.process_tool_result(
tool_id=single_response['id'],
content=feedback.strip())
async def handle_schedule_tool(self, response_content, function_name, function_args, single_response):
confirmation_tool = {
"name": "confirmation_tool",
"description": "This tool Analyze the chat history to determine if user confirmation was requested and given before scheduling the call. Set confirmation to true if user agreed, false if confirmation wasn't asked or user disagreed.",
"input_schema": {
"type": "object",
"properties": {
"Confirmation": {
"type": "boolean",
"description": "Confirmation flag that is only True when user confirms the details and is ready to move forward. Analyze user last response. Don't get hallucinated by history"}
},
"required": [
"Confirmation"
]
}
}
tools = [tool for tool in self.tools]
tools.append(confirmation_tool)
logger.info("Confirmation tool added to tool list")
output = self.base_llm.process(tools=tools, system=SYSTEM, force_tool=True, tool_name='confirmation_tool')
logger.info(f"confirmation tool output: {output}")
confirmation = output['content'][-1]['input']['Confirmation']
if confirmation:
logger.info("confirmation is true")
self.process_assistant_query(response_content)
await self.send_email(function_name, function_args, single_response)
else:
logger.info("confirmation is false")
logger.info(response_content)
logger.info(self.base_llm.messages)
if len(self.base_llm.messages) == 1:
feedback = "Take a deep breath and analyze current situation. Add greetings as this is first message of user and generate a professional message for user"
else:
feedback = "Take a deep breath and analyze current situation. And generate a professional message for user"
self.process_assistant_query(response_content)
logger.info(self.base_llm.messages)
self.process_tool_result(tool_id=single_response['id'], content=feedback, is_error=False)
# Prepare confirmation prompt based on tool name
async def send_email(self, function_name, function_args, single_response):
""" Send email based on the tool's input """
mailchimp = MailchimpTransactional.Client(settings.email.MANDRILL_API_KEY)
text = (f'User Name: {self.user_name}\n'
f'Request type: {function_args["request_type"]}\n'
f'Message: {function_args["message"]}')
message = {
"from_email": settings.email.MAIL_USERNAME,
"subject": f'Request: {function_args.get("request_type", "IBM Migration")}',
"text": text,
"to": [{'email': 'engg@wanclouds.net'}]
}
try:
response = mailchimp.messages.send({"message": message})
logger.info('Email sent successfully: {}'.format(response))
self.process_tool_result(single_response['id'], "Email sent successfully. Check your inbox.")
except ApiClientError as error:
logger.error(f'An error occurred while sending the email: {error.text}')
def process_user_query(self, query):
self.base_llm.add_message(role="user", content=query)
def process_assistant_query(self, query):
self.base_llm.add_message(role="assistant", content=query)
def process_tool_result(self, tool_id, content, is_error=False):
content = [{
"type": "tool_result",
"tool_use_id": tool_id,
"content": content,
"is_error": is_error
}]
self.base_llm.add_message(role="user", content=content)
|
CloudWhisperCustomBot | app/whisper/utils/migration_action_engine/consts.py | ibm_region_mapper = {
"dallas": "us-south",
"dallas tx": "us-south",
"dallas texas": "us-south",
"sydney": "au-syd",
"sydney au": "au-syd",
"sydney australia": "au-syd",
"london": "eu-gb",
"london uk": "eu-gb",
"london united kingdom": "eu-gb",
"frankfurt": "eu-de",
"frankfurt de": "eu-de",
"frankfurt germany": "eu-de",
"osaka": "jp-osa",
"osaka jp": "jp-osa",
"osaka japan": "jp-osa",
"tokyo": "jp-tok",
"tokyo jp": "jp-tok",
"tokyo japan": "jp-tok",
"sao paulo": "br-sao",
"sao paulo br": "br-sao",
"sao paulo brazil": "br-sao",
"toronto": "ca-tor",
"toronto ca": "ca-tor",
"toronto canada": "ca-tor",
"madrid": "eu-es",
"madrid es": "eu-es",
"madrid spain": "eu-es",
"washington dc": "us-east",
"washington": "us-east",
"dc": "us-east",
"d.c.": "us-east",
"dc.": "us-east",
"washington d.c.": "us-east",
"washington d.c": "us-east",
"washington usa": "us-east",
"washington us": "us-east",
"washington district of columbia": "us-east"
}
migrations_faq = """Below are frequently asked questions (FAQs) about migration. This information is provided for reference only, to help answer potential user queries. Do not use this to fill out any user requirements or assume any information about the user's specific situation.
<migration_guide>
1. What operating systems are supported for migration by Wanclouds?
Wanclouds supports the following operating systems for migration:
- Windows: 2016 or later editions
- Ubuntu: 20 or later editions
- Debian: 11 or later editions
- Red Hat Enterprise Linux (RHEL): 8 or later editions
- CentOS: 8 or later for Bare Metal, 9 for VSI
For operating systems that have reached End of Life (EOL), such as Windows 2012, Ubuntu 14/16/18, Debian 8/9/10, Red Hat 7, and CentOS 7 for Bare Metal, custom migration approaches may be needed, which could involve risks due to incompatibility with new hardware.
2. What are the prerequisites and preparations needed before starting the migration?
Before initiating the migration, the following prerequisites must be completed:
- Access to Current and Target Environments: Wanclouds needs access to your current environment. If the target infrastructure is already provisioned, access will be required for that as well.
- Preparation of Target Infrastructure: Ensure that the target infrastructure is ready, with connectivity between the source and target environments established, including necessary network configurations.
- Temporary Resources for Migration: Additional temporary resources, such as extra disk space or increased link speed, may be required for the migration process.
- Operating System Considerations: If your environment runs on an EOL operating system, an upgrade is necessary before migration, as Wanclouds does not support EOL operating systems.
- Migration Window: Provide a migration window at least one week in advance, including the preferred date and time.
3. How will Wanclouds access my current environment for migration?
To assess your environment before migration, Wanclouds will:
- Grant Access: You will need to add a Wanclouds team member's ID to your IBM Cloud account by sending an invitation for IBM Cloud access.
- Evaluate the Environment: With read access, Wanclouds will evaluate your current and target infrastructure, including OS versions, disk sizes, link speeds, applications, and any additional components or services.
- Full Access for Migration: For the actual migration, full access to your account will be required.
4. How does Wanclouds handle compatibility issues during migration?
Wanclouds will:
- Evaluate Compatibility: Assess compatibility between the current and target environments, including applications, OS versions, and network settings.
- Identify Conflicts: Identify potential conflicts and provide solutions or workarounds to address them.
5. Can I retain my current IP addresses during migration?
- IBM-Provided IPs: Cannot be retained due to unique subnets and IPs for each data center.
- Customer-Managed IPs: Can be retained with some network-level changes.
6. What is the first step in initiating the migration process with Wanclouds?
The first step involves a thorough evaluation of your environment, including virtual servers or bare-metal servers intended for migration. This evaluation will:
- Assess the Environment: Review the OS versions, disk sizes, link speeds, applications, and any additional services.
- Identify Compatibility Issues: Determine potential compatibility issues and estimate the required downtime.
- Document Findings: Document all findings to effectively plan the migration process.
7. What is the migration method used by Wanclouds?
Wanclouds primarily uses the "Lift and Shift" method for migration, which involves:
- Creating an Image Template/Backup: An image template or backup of the source environment is created, temporarily shutting down the server.
- Transferring the Image: The image is transferred to the target data center.
- Restoring the Environment: The image is restored on new servers in the target data center.
Note that Wanclouds does not provide application deployment services, and any required IP-level changes at the application level must be managed by the customer. Custom migration approaches may be necessary for EOL operating systems, which carry risks due to incompatibility with new hardware.
8. How does Wanclouds use IBM’s Managed Image Template Utility during migration?
For supported operating systems on virtual servers (VSIs):
- Image Creation and Transfer: The virtual server will be temporarily down during image creation. The image is then transferred and deployed onto a new VSI in the target data center.
9. What is the typical migration timeline with Wanclouds?
- Virtual Server Instances (VSIs):
- 100 GB OS Drive, No Secondary Drive (Link Speed 100 Mbps):
- Downtime: Approximately 1 hour
- Total Migration Process: 3-4 hours
- 100 GB OS Drive, 100 GB Secondary Drive (Link Speed 100 Mbps):
- Downtime: 3 hours
- Total Migration Process: 3-4 hours
- Disk Sizes of 200 GB or More:
- The provided times are estimates based on a 100 GB disk size. Larger disk sizes should be calculated using the 100 GB example as a reference.
- Bare Metal Servers (BM):
- 1 TB OS Drive, No Secondary Drive (1 Gbps Link Speed):
- Downtime: 8-12 hours
- Total Migration Process: 18-24 hours
- 1 TB OS Drive, 1 TB Secondary Drive (1 Gbps Link Speed):
- Downtime: 8-12 hours
- Total Migration Process: 30-36 hours
- Disk Sizes of 2 TB or More:
- The provided times are estimates based on a 1 TB disk size. Larger disk sizes should be calculated using the 1 TB example as a reference.
- General Notes:
- Pre-Migration Assessment: Takes 1-2 days
- Migration Method: Involves creating an image of the source environment, transferring it, and deploying it on new servers
- Post-Migration Testing: Required to ensure all applications and services are running correctly
10. What downtime and business impact should I expect during migration?
- Expected Downtime: Downtime varies depending on the size and type of migration.
- Business Impact: Wanclouds will make every effort to minimize downtime and its impact on your business operations.
11. What steps should I take after the migration is complete?
After the migration, the following steps are necessary:
- Customer Verification: Verify that all applications, data, and services are functioning correctly on the new server.
- DNS Switching: Once verified, you need to switch the DNS to point to the new server. Wanclouds does not assist with DNS switching; this must be handled by the customer.
- Software Licensing: Apply any required software licenses on the new server.
- Application Configuration: Configure applications to work with the new IP addresses, as Wanclouds does not manage application-level changes or configurations.
Data Center Migrations
As part of the data center modernization strategy for IBM Cloud®, a data center is scheduled to consolidate. IBM Cloud invests significantly in data center infrastructure. These investments include rolling out newer data centers and multizone regions (MZRs) designed to deliver a more resilient architecture with higher levels of network throughput and redundancy. For more information, see [Locations for resource deployment](#).
Part of this modernization strategy is to consolidate select data centers. As this transition approaches, help is available to assist you in your migration to modern data centers. For more information, see [Migrating resources to a different data center](#) or [FAQs for migrating resources to a different data center](#).
The following table shows the location with the associated data center that is set to close, and the recommended data centers to which you can migrate.
| Location | Data Center to Close | Recommended Data Centers | Migration Deadline |
|----------|----------------------|--------------------------|--------------------|
| Dallas | DAL09: POD3 and POD4 | DAL10, DAL12, DAL13 | 4 March 2025 |
Table 1. Data center that is closing in 2025.
The following table describes important dates that you need to be aware of if you have services that run in the data centers that are set to close. You will receive notifications if you have services in the data centers that are closing.
| Date | Data Center Migration Milestone |
|----------------|-----------------------------------------------------------------------------------|
| 06 August 2024 | Official announcements for all impacted customers in DAL09 - POD3 and POD4 |
| 06 August 2024 | No new account provisioning in impacted datacenter |
| 14 October 2024| Last date to provision services in impacted datacenter |
| 05 February 2025 | Network maintenance DAL09 - POD3 and POD4. 10:AM US Eastern Time (14:00 GMT) |
| 10 February 2025 | Last date to submit migration assistance request |
| 04 March 2025 | Last date to migrate before consolidation of DAL09 - POD3 and POD4 datacenter begins|
Table 2. Timeline for data center migration.
---
Legacy IBM Cloud Managed Services Offering Transition
IBM Cloud is transitioning its Managed Services support offering, a legacy offering, from managed by IBM to managed by Wanclouds, an IBM partner, or to self-managed by the client. The effective date for this change is 1 January 2025. In most cases, changes may be minor and should not interrupt your operations.
If you use the Managed Services offering, you should open a support case in the Support Center no later than 1 November 2024 to provide information on which transition option you choose and to coordinate with IBM on the changes. IBM requires a minimum of eight weeks to transition or enable the clients, either opting in or out, so IBM requests your decision by 1 November.
As this transition approaches, help is available to assist you. If you have questions, contact us by opening a support case or by reaching out to your IBM representative in Technical Account Management (TAMs), Client Services (CSMs), or Sales for assistance.
Transition Approach and Options
You have the following options for this transition:
- Opt in to partner with Wanclouds and transition your management of a managed hosting environment to Wanclouds.
- Opt out to a self-managed environment that you manage yourself.
- Opt out to a self-managed environment with a self-appointed business partner.
Timeline
The following table describes important dates that you need to be aware of if you use the Managed Services support offering.
| Date | Managed Services Transition Milestone |
|--------------------|----------------------------------------------------------------------------------------------------|
| 26 August 2024 | Affected clients received a notification dated 26 August 2024 with information on this change. |
| 1 November 2024 | Last date to open a support case with your transition decision. |
| 30 November 2024 | Last date to add servers to the existing setup and have them be included in the transition. |
| 1 January 2025 | IBM no longer provides a managed level of support for server, security, storage, and database management services that were in scope as part of the Managed Services offering. <br>IBM is no longer able to log into your devices with IBM’s administrative domain accounts. IBM demotes and removes its controllers and services from the client’s domain as part of the decommission. <br>Any remaining billing for the Managed Services offering ends. Billing for devices continues within the normal IBM billing process. <br>Any remaining domain-based resources that were not transitioned might begin to experience failures because the infrastructure will be disabled. <br>If you opt in to partnering with Wanclouds, the domain moves to and remains on the Wanclouds controllers without impact or interruption. <br>If you opt out to a self-managed environment, the responsibility for managing the devices reverts solely to you as self-managed devices. |
Table 3. Managed Services transition milestones.
---
Steps for Opting In with Wanclouds
You can expect the following tasks to be part of the transition to partnering with Wanclouds:
1. The client will confirm their intent to opt in via a proactive support case.
2. As soon as the case is created, TAMs/CSM will reach out to the clients and initiate a kickoff call along with IBM & Wanclouds.
3. IBM will configure servers that Wanclouds deploy on their own account and promote them to Domain Controllers for the management domain, which will automatically sync the current domain information to them and propagate any changes that may be made throughout the customer transition process.
4. This approach ensures that clients and IBM won’t have to recreate any users or worry about passwords being correct, as the current/existing information will be synced and will remain the same (similar to the existing client setup).
5. IBM will work with clients and Wanclouds to ensure any access or rules that may need to be created or updated to allow Wanclouds’ admins the ability to manage these devices for the customer, as well as to set up and verify network communication between the customer’s environment and Wanclouds.
6. To enact the transition, at a time scheduled with the client, IBM will assign the customer’s subnets to a zone defined within the management domain for the Wanclouds controllers and change the DNS servers on the customer’s servers to point to the Wanclouds controllers.
7. These actions do not require a reboot or cause interruption, and IBM will verify proper connectivity between the customer and Wanclouds servers beforehand.
8. IBM will not remove anything on its end, so in the unlikely event of unexpected results, reverting for investigation, if necessary, will be seamless.
9. IBM will also remove its monitoring agents (Zabbix), which will not impact servers.
Effective January 1st, 2025: IBM will no longer be able to log into client devices with Administrative domain accounts, and any remaining domain-based resources will begin to experience failures (if the above steps are not executed) as the infrastructure will be disabled.
On January 31st, 2025: IBM will demote and remove our controllers and servers from the domain as part of our decommission, and the domain will remain on the Wanclouds controllers without impact or interruption.
For more details on Wanclouds' introduction and the scope that they will be offering as part of the transition, see [Empower Your Business with Multi-Cloud Managed Service by Wanclouds](#).
---
Steps for Opting Out to Self-Managed
You can expect the following tasks to be part of the transition to a self-managed environment:
1. The client will confirm their intent to opt out via a proactive support case.
2. As soon as the case is created, TAMs/CSM will reach out to the clients and initiate a kickoff call along with IBM & Wanclouds.
3. For clients opting out, there will be a discovery phase to understand the client setup and plan the next steps.
4. Once the client confirms their intent to opt out, IBM will work with the client to identify which users are active on which servers and where they need to be recreated locally.
5. Clients will need to identify what resources, such as users, they are using from our management Active Directory domain, mhsl01.mhsl.local (already known to the client), and replace them with their own users.
6. IBM will provide a list of users that have been created for each account, as requested.
7. Once confirmed and acknowledged by the client, IBM will remove the servers from our domain, and the client will be able to self-manage these servers based on the users created locally.
8. For Windows servers, moving the servers from the IBM domain will require at least one reboot. For Linux, it is not necessary, but IBM recommends scheduling a reboot for validation.
9. Clients who already have their own domain servers configured will not be required to go through the above steps of user identification and domain transfer.
10. IBM will also remove its monitoring agents (Zabbix), which will not impact servers.
11. Once the client confirms and acknowledges the above steps, the fees for the monitoring service will be removed.
Effective January 1st, 2025: IBM will no longer be able to log into client devices with Administrative domain accounts, and any remaining domain-based resources will begin to experience failures as the infrastructure is disabled.
On January 31st, 2025: IBM will demote and remove our controllers and servers from the domain as part of our decommission, and the domain will remain on the Wanclouds controllers without impact or interruption.
---
FAQs About Managed Services Transition
The following are FAQs about managed services transition.
FAQs About Managed Services Migration
1. What does the migration of Managed Services mean for clients?
Effective January 1st, 2024, the Managed Services support offering will migrate from being managed by IBM to being managed by our trusted partner, Wanclouds. This migration involves only the management of devices, which will remain with IBM. There will be no changes to existing customer accounts, except that the managed services line item will be excluded from the invoice. Clients have the following migration options:
1. Opt-in for the migration to Wanclouds.
2. Opt-out of the migration and be enabled by IBM to self-manage the environment.
3. Opt-out and appoint their own provider to manage the environment. IBM will work with the new partner to facilitate the migration.
IBM will ensure that all existing clients are seamlessly migrated, regardless of their selected option.
2. Will the migration to Wanclouds incur any additional charges to clients?
No, the migration to Wanclouds and the continued operation of the existing environment will not incur any additional costs for clients.
3. Will the migration change the existing scope or deliverables for Managed Hosting Service Management?
No, the migration will not change the existing scope or deliverables. Wanclouds has agreed to provide the same scope and deliverables currently offered by IBM.
4. How do I contact Wanclouds after the migration if I have any issues?
After the migration, you can contact Wanclouds via:
- Website: [support.Wanclouds.net](https://support.Wanclouds.net)
- Email: support@Wanclouds.net
- Phone: +1 (415) 347-6739
5. How will the migration and engagement with Wanclouds proceed?
Once a client confirms their decision to opt-in for the migration, IBM will set up a call with Wanclouds and will collaborate with both the client and Wanclouds to ensure a seamless migration process.
6. What if I do not opt for the migration to Wanclouds?
If a client decides not to opt-in for the migration, IBM will set up a call to guide them and ensure they are enabled to self-manage the environment without IBM’s involvement.
7. How will clients communicate their decision to opt-in or opt-out of the migration?
Clients can communicate their decision regarding the migration by opening a proactive support center case with IBM or by contacting their IBM account or sales representative.
8. What will be the impact on my infrastructure due to the migration?
The migration will not impact client devices or infrastructure, which will remain with IBM. Clients will have the option to contact Wanclouds or a self-appointed provider for any issues related to device management. For device or infrastructure-related issues, clients will continue to follow the existing case management process with IBM.
9. Will the migration change how customers use their devices?
No, the migration will not change how customers use their devices. Some settings may need to be adjusted to facilitate the migration, but these changes will not be noticeable to customers.
10. How will customers get support after the migration to Wanclouds?
After the migration, customers can engage Wanclouds directly for managed service-related requests or issues. Devices will remain on the customer’s IBM Cloud account, and customers will continue to engage IBM as they currently do.
11. Once a customer migrates to Wanclouds, how will billing be handled?
As part of the migration, IBM will remove billing for the managed service from the customer’s IBM Cloud account, but device billing will continue as usual. The customer will need to set up separate billing with Wanclouds or another provider for managed services.
12. The managed service included 60GB of IBM Cloud Backup storage at no additional cost. What happens to that storage if a customer opts not to migrate?
If a customer opts not to migrate, they will still be able to retain the 60GB of IBM Cloud Backup storage as part of the transition.
13. What if I need to add a new device or server to my existing setup before the migration?
Clients can add servers to their existing setup by November 30th, 2024, before the migration. IBM requests that clients inform them of any additions with as much notice as possible to ensure a seamless migration.
14. Do I have to wait until January 1st, 2025, for the migration to Wanclouds?
No, IBM and Wanclouds require eight weeks from the client’s opt-in decision to complete the migration. The migration can occur sooner, depending on the client’s needs.
Wanclouds Bot - Migration Information Documentation
Supported Operating Systems
Wanclouds supports only the operating systems listed above, as IBM supports these versions exclusively.
Windows: 2016 or later editions
Ubuntu: 20 or later all editions
Debian: 11 or later all editions
Red Hat Enterprise Linux (RHEL): 8 or later all editions
CentOS: 8 or later editions for Bare Metal, 9 for VSI
If a customer has a version other than those mentioned above and asks for migration details, please mention that Wanclouds does not support EOL operating systems, but we can review the situation and discuss it during a call.
Non-supported Versions (EOL):
Windows : 2012 or 2012R2
Ubuntu: 14,16,18
Debian: 8,9 ,10
Redhat: 7
Centos: 7 for BM, 8 &9 for VSI
Note: Custom migration approaches may be needed for end-of-life (EOL) operating systems, which may carry risks due to incompatibility with new hardware</migration_guide>"""
confirmation_tool = {
"name": "confirmation_tool",
"description": "This tool Analyze the chat history to determine if user confirmation was requested and given before scheduling the call. Set confirmation to true if user agreed, false if confirmation wasn't asked or user disagreed.",
"input_schema": {
"type": "object",
"properties": {
"Confirmation": {
"type": "boolean",
"description": "Confirmation flag that is only True when user confirms the details and is ready to move forward. Analyze user last response. Don't get hallucinated by history"}
},
"required": [
"Confirmation"
]
}
}
email_tool = {
"name": "email_tool",
"description": "You are email write expert who extra all information from chat history and create email body to our support agent to provide all details which customers provided. Write email from migration chatbot who collected requirements and now putting request to Wanclouds and will keep user in cc",
"input_schema": {
"type": "object",
"properties": {
"email_body": {
"type": "string",
"description": "Email body covering all of following detail Current DataCenter \nTarget Datacenter\nMigration Type\nBare metal to migrate\nVSIs to migrate\nStorages to Migrate\nFirewall to Migrate\nLoad Balancer to Migrate\nCustomer Call Availability. Also add user information who requested to put migration request"
},
"subject": {
"type": "string",
"description": "Subject of the email"
},
},
"required": [
"email_body",
"subject"
]
}
}
|
CloudWhisperCustomBot | app/whisper/utils/migration_action_engine/prompt.py | SYSTEM = """You are an AI support agent for Wanclouds, a partner of IBM Cloud, assisting with datacenter migrations as part of IBM's Datacenter Modernization initiative. Your primary role is to help customers affected by the closure of legacy datacenters, such as Dal09 (Dallas 9), by collecting essential information for the Wanclouds migration team.
<general-instructions>
1. Always write clear, specific queries for the database. Include all necessary information and avoid ambiguity. Examples of what to avoid:
- Vague queries like "Give me details."
- Ambiguous questions like "What are the companies of this user?" as the db_tool has no context of "this user."
2. Don’t populate values or make assumptions.
3. Prompt all information to user in readable format. Use bullet points or numbered steps when presenting information or instructions to the user. And format your answer properly
4. To interact with the `db_tool`, provide a natural language query. The `db_tool` will convert the query to a database query and execute it against the database to retrieve the relevant information.Please describe your request in plain language, focusing on the information you need rather than the specific database structure or query syntax.
<example>{{query = "What are the names of all the companies"}}</example>
5. Break down complex requests into smaller, manageable steps
6. Use the `db_tool` judiciously and only when necessary to avoid overloading the user with irrelevant information.
7. Analyze previous messages to understand the context and utilize any relevant information present in the conversation history.
8. Take a break after calling db_tool consecutively 3 times
9. Don't hallucinate if you get empty result of any query.
10. Db_tool returns both cypher query and result. From that you can develop an understanding what actually db_tool is able to find out against your query
11. There are two types of fields in region: 'name' and 'display name'. The 'name' field contains abbreviations of regions (e.g., us-south), while the 'display name' field contains full region names (e.g., Dallas). If a user asks about any resource related to a region and you get an empty response, fetch all available regions and display them to the user. Then, ask the user to select from the provided regions to give it another try.
12. Maintain a professional tone in your responses. Do not mention the database or db_tool to the user, as they are not aware of the internal implementation. In case of an empty response, simply inform the user that you couldn't find any resource matching their query.
13. You can use qna for general queries. It has all documentation of migration related queries.
14. Classic Cloud account is not a resource of a datacenter.
</general-instructions>
<cost_associated_with_migrated>
This service is free for all legacy datacenters. like Dal09. But not for others
</cost_associated_with_migrated>
<database-schema>
{schema}
</database-schema>
<current_time>
Current time in UTC: {current_time}
</current_time>
<requirements_to_collect>
1. Source datacenter identification:
- Ask the user which datacenter they're currently using.
- Verify if the datacenter has any resources using the db_tool.
2. Target datacenter selection:
- If the user hasn't specified a target, recommend DAL10, DAL12, or DAL13.
3. Infrastructure type:
- Ask if the user wants to migrate to Classic or VPC infrastructure.
- Provide a brief explanation of each if needed.
4. Resource identification:
- Request a list of specific resources the user wants to migrate.
- Use the db_tool to verify the existence of these resources in the source datacenter.
5. Scheduling:
- Ask for the user's preferred meeting time and date.
- Use the current_time to suggest appropriate time slots within business hours.
- Confirm the scheduled time with the user.
Additional concerns:
Ask if the user has any other questions or concerns about the migration process.
</requirements_to_collect>
Follow this format for each interaction:
Question: [Summarize the user's question or latest response]
Thought: [Analyze the current situation, considering: Which requirements have been collected so far? What's the next logical step based on the user's response? Do I need to use the db_tool to verify any information? How can I maintain a natural conversation flow?]
Response: [Craft a precise, professional response keep user question and converstation in mind without mentioning internal functionality]"""
CONFIRMATION = """You are a helpful assistant tasked with analyzing the chat history of the previous step. In the previous interaction, a bot collected parameters from the user to perform an action. The bot had access to two tools: a db_tool for retrieving information from a database, and an Action tool for performing tasks.
Your job is to determine whether the bot explicitly sought and received confirmation from the user before proceeding with the action. Focus specifically on the last message exchange right before the bot called the action tool. Check if the bot confirmed with the user by saying something like "Okay, I'm going to perform the action with the following parameters" and then listed the parameters.
Strictly adhere to the following output format:
Thought: [think step by step here to analyze chat history and latest query]
Confirmation: [True/False]
Explanation: [Provide a brief explanation of your determination. If confirmation was not sought or received, explain what was missing. If confirmation was obtained, describe how it was done, including any parameters that were listed.]"""
|
CloudWhisperCustomBot | app/whisper/utils/migration_action_engine/__init__.py | from app.whisper.utils.migration_action_engine.base import MigrationActionPhaseClaude
__all__ = ['MigrationActionPhaseClaude'] |
CloudWhisperCustomBot | app/whisper/utils/migration_action_engine/base.py | from datetime import datetime
import json
import copy
import mailchimp_transactional as MailchimpTransactional
from mailchimp_transactional.api_client import ApiClientError
import requests
from anthropic import APIConnectionError, RateLimitError, APIStatusError
from loguru import logger
# from jsonschema import validate, ValidationError
from app.core.config import neo4j_driver
from app.whisper.llms.anthropic import AnthropicLLM
from app.whisper.utils.migration_action_engine.consts import (ibm_region_mapper, confirmation_tool,
email_tool)
from app.whisper.utils.migration_action_engine.prompt import SYSTEM
from app.whisper.utils.neo4j.client import Neo4j
from app.core.config import settings
class MigrationActionPhaseClaude():
def __init__(self, intent: str, chat_history: list, bearer: str, user_dict: str, metadata_dict, action_id, cloud_id):
self.base_llm = AnthropicLLM()
self.base_llm.messages = chat_history # it adds messages in self.base_llm
self.endpoint = intent[-1]['method']['endpoint']
self.api_function_name = intent[-1]['method']['tool']['name']
self.api_function_schema = copy.deepcopy(intent[-1]['method']['tool']['input_schema'])
self.bearer = bearer
self.tools = self._get_tool_from_intent(intent)
self.user_name = user_dict['name']
self.user_email = user_dict['email']
self.user_id = user_dict['id']
self.response_id = metadata_dict.get('response_id', '')
self.confirm_execution = metadata_dict.get('confirm_execution')
self.confirm_in_tool = metadata_dict.get('confirm_in_tool')
self.tool_force = None
self.func_args = metadata_dict.get('payload', {})
self.metadata_dict = metadata_dict
self.action_id = action_id
self.cloud_id = cloud_id
def _get_tool_from_intent(self, intent):
tools = [intent[-1]['method']['tool']] # Assumes intent[-1] is the highest priority
db_tool = {
"name": "db_tool",
"description": "This is a helper tool. It assists in finding and validating information from databases. "
"Use this as much as possible to fill out parameters of the tool '{function_name}'."
" Find out information from Knowledge graph database.",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "queries like 'What are the names of all the companies'"}
}
}
}
qna_tool = {
"name": "qna_tool",
"description": "This is a helper tool. It is used to answer questions related to migration. ",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "queries like 'Tell me more migration offering'"}
}
}
}
description = db_tool["description"]
updated_description = description.format(function_name=self.api_function_name)
db_tool["description"] = updated_description
tools.append(db_tool)
tools.append(qna_tool)
return tools
async def start(self, query: str):
response = self.get_response_from_llm(query)
content = await self.process_response(response)
return content
def get_response_from_llm(self, query=''):
if query:
logger.info(f"ADDING USER QUERY -> role='user', content={query}")
self.process_user_query(query=query)
try:
with neo4j_driver.session() as session:
neo4j_client = Neo4j(db_session=session, user_id=self.user_id)
self.schema = neo4j_client.generate_schema(user_id=self.user_id, migration=True)
if self.metadata_dict.get('confirm_execution'):
tools = [tool for tool in self.tools]
tools.append(confirmation_tool)
logger.info(tools)
with neo4j_driver.session() as session:
neo4j_client = Neo4j(db_session=session, user_id=self.user_id)
self.schema = neo4j_client.generate_schema(user_id=self.user_id, migration=True)
current_datetime = datetime.now()
readable_format = current_datetime.strftime("%Y-%m-%d %H:%M:%S")
chat_response = self.base_llm.process(tools=tools,
system=SYSTEM.format(schema=self.schema, current_time=readable_format),
force_tool=True,
tool_name="confirmation_tool")
confirmation = chat_response['content'][-1]['input']["Confirmation"]
self.metadata_dict['confirm_execution'] = False
logger.info(confirmation)
if confirmation:
self.base_llm.messages[-1]['content'] = f"""The user has confirmed their choice. Now, let's create an email from the migration chatbot to the migration team. The email should inform the team about the user who is submitting the request. Include the following user details in the email
Name: {self.user_name}
Email: {self.user_email}
Please draft an email using this information, addressing it to the migration team and explaining that a new migration request has been received"""
tools = [tool for tool in self.tools]
tools.append(email_tool)
with neo4j_driver.session() as session:
neo4j_client = Neo4j(db_session=session, user_id=self.user_id)
self.schema = neo4j_client.generate_schema(user_id=self.user_id, migration=True)
current_datetime = datetime.now()
readable_format = current_datetime.strftime("%Y-%m-%d %H:%M:%S")
chat_response = self.base_llm.process(tools=tools,
system=SYSTEM.format(schema=self.schema, current_time=readable_format),
force_tool=True,
tool_name="email_tool")
logger.info(chat_response)
email_body = chat_response['content'][-1]['input']["email_body"]
subject = chat_response['content'][-1]['input']["subject"]
mailchimp = MailchimpTransactional.Client(settings.email.MANDRILL_API_KEY)
message = {
"from_email": settings.email.MAIL_USERNAME,
"subject": subject,
"text": email_body,
"to": [{'email': 'engg@wanclouds.net'}]
}
try:
response = mailchimp.messages.send({"message": message})
logger.info('Send email called successfully: {}'.format(response))
except ApiClientError as error:
logger.error('An exception occurred: {}'.format(error.text))
self.base_llm.messages[-1]['content'] = 'User confirmed his choice. And an email has been sent to Wanclouds Migration team. Please let user know'
current_datetime = datetime.now()
readable_format = current_datetime.strftime("%Y-%m-%d %H:%M:%S")
system = [
{
"type": "text",
"text": SYSTEM.format(schema=self.schema, current_time=readable_format)
}
]
# if len(self.base_llm.messages) == 1:
# chat_response = self.base_llm.process(tools=self.tools, system=system, prompt_caching=True, force_tool=True, tool_name='db_tool')
# else:
chat_response = self.base_llm.process(tools=self.tools, system=system)
logger.info(chat_response)
if not chat_response:
logger.info("Received empty chat response")
return chat_response
except APIConnectionError as e:
logger.info(f"API connection error: {e}")
raise
except RateLimitError as e:
logger.info(f"Rate limit error: {e}")
except APIStatusError as e:
logger.info(f"API status error: {e}")
logger.info(f"Status code: {e.status_code}, Response: {e.response}")
# def validate_payload(self, payload):
# missing_keys = []
# schema = self.api_function_schema
# def check_required_fields(schema_part, payload_part, path=""):
# if isinstance(schema_part, dict):
# if "required" in schema_part:
# for field in schema_part["required"]:
# if field not in payload_part:
# missing_keys.append(f"{path}.{field}" if path else field)
#
# if "properties" in schema_part:
# for prop, prop_schema in schema_part["properties"].items():
# if prop in payload_part:
# new_path = f"{path}.{prop}" if path else prop
# check_required_fields(prop_schema, payload_part[prop], new_path)
#
# if "anyOf" in schema_part:
# any_valid = False
# for sub_schema in schema_part["anyOf"]:
# sub_missing = []
# check_required_fields(sub_schema, payload_part, path)
# if not sub_missing:
# any_valid = True
# break
# if not any_valid:
# missing_keys.append(f"{path} (at least one condition must be met)")
#
# check_required_fields(schema, payload)
#
# try:
# validate(instance=payload, schema=schema)
# if not missing_keys:
# print("Validation successful!")
# return True, []
# else:
# print("Validation failed due to missing keys.")
# return False, missing_keys
# except ValidationError as e:
# print(f"Validation error: {e}")
# return False, missing_keys
def api_call(self):
args = json.loads(json.dumps(self.func_args))
logger.info(f"ARGS created finalized by LLM: {args}")
if args.get('path_params'):
for key, val in args["path_params"].items():
self.endpoint = self.endpoint.replace('{' + key + '}', val)
temp = dict()
for key, val in args.items():
if key != 'path_params':
temp[key] = val
args = temp
logger.info(f"Making POST request to API with args: {args}")
logger.info(f"endpoint-> {self.endpoint}")
call_response = requests.post(
self.endpoint,
json=args,
headers={"Authorization": self.bearer}
)
call_response_for_llm = f"Status-code : {call_response.status_code}\nResponse : {call_response.text}"
logger.info('#' * 20)
logger.info(call_response_for_llm)
logger.info('#' * 20)
return call_response_for_llm
async def process_response(self, response):
from app.main import app
if not response:
raise Exception("Invalid response from llm")
logger.info(response)
response_content = response['content']
if response['stop_reason'] == 'tool_use':
for single_response in response_content:
if single_response['type'] == 'text':
if 'Confirmation Flag: True' in single_response['text']:
self.confirm_in_tool = True
self.process_assistant_query(response_content)
if single_response['type'] != 'tool_use':
continue
if self.confirm_in_tool:
call_response_for_llm = self.api_call()
self.process_tool_result(single_response['id'], call_response_for_llm)
new_response = self.get_response_from_llm()
return await self.process_response(new_response)
pass
self.process_assistant_query(response_content)
function_args, function_name = single_response['input'], single_response['name']
logger.info(f"function ARGS: {function_args}")
logger.info(f"function name: {function_name}")
if function_name == 'db_tool':
question = function_args['query']
if any(keyword in question for keyword in ['MATCH', 'match', 'SELECT']):
feedback = f'Internal feedback: The function argument you generated for db_tool was {question},\
but it appears to be in an incorrect format. To ensure proper functionality, please generate \
a natural language query for the db_tool instead of using Cypher query language. Respond with \
a valid JSON object containing a single key named "query" and its corresponding value as a \
string. The value should represent the natural language query you generated based on the \
given question.'
self.process_tool_result(tool_id=single_response['id'], content=feedback, is_error=True)
break
regions = ibm_region_mapper.keys()
question_lower_case = question.lower()
for region in regions:
region_lower = region.lower()
if region_lower in question_lower_case:
question = question_lower_case.replace(region_lower, ibm_region_mapper[region])
neo4j_client = Neo4j(db_session=app.state.neo4j_session, user_id=self.user_id, cloud_id=self.cloud_id)
result, cypher = neo4j_client.run(
question=f'{question}'
)
self.process_tool_result(
tool_id=single_response['id'],
content=f"cypher: {cypher} and result:{result}")
elif function_name == 'qna_tool':
from app.web.common.chats_websockets_utils import get_base_bot_response
payload = {"question": function_args['query'], "messages": []}
assistant_message = ""
try:
async for chunk in get_base_bot_response(payload):
response_lines = chunk.splitlines()
for line in response_lines:
if line.startswith("data:"):
chunk = line.split(": ")[1]
assistant_message += chunk
if not assistant_message.strip():
raise ValueError("Received an empty response from the assistant")
except Exception as e:
logger.error(f"An error occurred in execute_qna: {str(e)}")
raise e
self.process_tool_result(
tool_id=single_response['id'],
content=assistant_message.strip())
elif function_name == self.api_function_name:
try:
# is_valid, missing_keys = self.validate_payload(function_args)
# missing_keys = True
# logger.info(f"{missing_keys}")
# if missing_keys:
# logger.info(f"The following keys are missing: {missing_keys}")
# missing_keys_error = f'''The following keys are missing from the provided payload: {missing_keys}. Please fill out missing information before proceeding forward'''
# self.process_tool_result(single_response['id'], missing_keys_error, is_error=True)
# break
self.metadata_dict['confirm_execution'] = True
confirmation_llm = AnthropicLLM()
confirmation_llm.add_message(role='user', content=f'''here is the payload enclosed in double\
quotes\n"{function_args}" your job is to show this to the \
user in a pretty way in the following format not similar to payload \
and ask for confirmation if all the \
information is correct. if all the information is correct user should say 'yes' \
if something needs changing user should say 'no'. Start your response to the user: ''')
message_str = confirmation_llm.process()
self.process_tool_result(tool_id=single_response['id'], content='Before moving ahead took this confirmation from user')
self.process_assistant_query(message_str)
message_str = message_str.replace('\n', '<br>')
return message_str.replace('\n', '<br>')
except APIConnectionError as e:
logger.info(f"API connection error: {e}")
raise
except RateLimitError as e:
logger.info(f"Rate limit error: {e}")
except APIStatusError as e:
logger.info(f"API status error: {e}")
logger.info(f"Status code: {e.status_code}, Response: {e.response}")
else:
logger.info(f"Wrong function called : {function_name}")
# Fetching new response after handling tool call
new_response = self.get_response_from_llm()
return await self.process_response(new_response)
elif response_content and response_content[0].get('text'):
logger.info("Processing user control response")
self.process_assistant_query(response_content)
if 'Confirmation Flag: True' in response_content[0]['text']:
call_response_for_llm = self.api_call()
chat_response = self.get_response_from_llm(call_response_for_llm)
return await self.process_response(chat_response)
user_message = response_content[0]['text']
if 'Thought:' in user_message and 'Response:' not in user_message:
logger.info("******** Response key is missing in response, Retrying to get response from LLM *********")
self.base_llm.add_message(role='user',
content=f'Internal Feedback: You did not responded correctly and missed'
f' generating response in Response: key. Do not generate it as a tag'
f' like <Response> instead generate the response with key Response:')
response = self.get_response_from_llm()
return await self.process_response(response)
elif "Response:" in user_message:
user_message = user_message.split('Response:')[1]
return user_message
else:
return user_message
else:
raise Exception(f"Unexpected response format {response}")
def process_user_query(self, query):
self.base_llm.add_message(role="user", content=query)
def process_assistant_query(self, query):
self.base_llm.add_message(role="assistant", content=query)
def process_tool_result(self, tool_id, content, is_error=False):
content = [{
"type": "tool_result",
"tool_use_id": tool_id,
"content": content,
"is_error": is_error
}]
self.base_llm.add_message(role="user", content=content)
|
CloudWhisperCustomBot | app/whisper/utils/neo4j/prompt.py | SYSTEM = """You are a helpful Neo4j assistant who generates intelligent Cypher queries to help user finds out information from a Neo4j graph database based on the provided schema definition, without giving any explanation.
<instructions>Strictly follow these instructions:
1. Use only the provided relationship types and properties.
2. Always query across all properties for the given search term using the 'any' key.
3. Do not include any explanations or apologies in your responses.
4. Make sure to return id and names in all queries
5. Provide a clear explanation of the relationships between the nodes:
- Identify the starting node and the target node based on the user's question.
- Trace the path from the starting node to the target node, considering all the relationships and intermediate nodes along the way.
- If multiple relationship paths exist between nodes, select the shortest path.
- Explain each relationship and its direction, highlighting any intermediate nodes encountered.
- Choose direct relationship instead of intermediate nodes
- Summarize the overall path and the connections between the nodes
6. Retrieve only information asked in query. Avoid retrieving extra info
7. Fetch only 20 records.
8. When projecting properties from related nodes, use the following syntax:
RETURN n, r.prop1 AS prop1, r.prop2 AS prop2
Instead of:
RETURN n, (n)-[:REL]-(r) {{.prop1, .prop2}}
9. If user asks for all details then its doesn't mean that you start fetching all the neighboring relationship. Avoid creating large queries in such cases.
{clause}
</instructions>
<cypher_techniques>How to write a query:
A. If no specific param is mentioned, start with a simple query.
<example>
MATCH (n:Node)
WHERE n.cloud_id = '345678654325675432'
RETURN n.prop1, n.prop2, n.prop3, n.prop4, ...
LIMIT 20
</example>
B. If a specific attribute is mentioned, use regex or fuzzy matching:
<example>
MATCH (n:Node)
WHERE n.cloud_id = '345678654325675432'
AND ANY(key IN keys(n) WHERE n[key] IS NOT NULL AND toString(n[key]) =~ '(?i).*searchTerm.*')
RETURN n.prop1, n.prop2, n.prop3, n.prop4, ...
LIMIT 20
</example>
C. When querying relationships:
<example>
MATCH (n:Node1)-[r:RELATIONSHIP]->(m:Node2)
WHERE n.cloud_id = '345678654325675432'
RETURN n.prop1, n.prop2, m.prop3, m.prop4, r.property AS relationshipProperty
LIMIT 20
</example>
D. Using shortestPath with cloud_id:
<example>
MATCH (n1:Node1), (n2:Node2)
WHERE n1.cloud_id = '345678654325675432' AND n2.cloud_id = '345678654325675432'
MATCH p = shortestPath((n1)-[*]-(n2))
RETURN n1.prop1, n1.prop2, n2.prop3, n2.prop4, [node IN nodes(p) | node.prop] AS pathNodes
LIMIT 20
</example>
</cypher_techniques>
<complex_query_techniques>
1. For queries involving multiple node types and relationships:
<example>
MATCH (n1:Node1)-[:REL1]->(n2:Node2)-[:REL2]->(n3:Node3)
WHERE n1.cloud_id = '345678654325675432'
RETURN n1.prop, n2.prop, n3.prop
LIMIT 20
</example>
2. For aggregations and grouping:
<example>
MATCH (n:Node)-[:RELATIONSHIP]->(m:RelatedNode)
WHERE n.cloud_id = '345678654325675432'
RETURN n.name, COUNT(m) AS relatedCount, COLLECT(m.name) AS relatedNames
LIMIT 20
</example>
3. For complex filtering:
<example>
MATCH (n:Node)
WHERE n.cloud_id = '345678654325675432'
AND (n.property1 > 100 OR n.property2 IN ['value1', 'value2'])
AND EXISTS((n)-[:SOME_RELATIONSHIP]->())
RETURN n.prop1, n.prop2, n.prop3, n.prop4, ...
LIMIT 20
</example>
4. For queries involving date/time comparisons:
<example>
MATCH (n:Node)
WHERE n.cloud_id = '345678654325675432'
AND n.createdAt > datetime() - duration('P7D')
RETURN n.prop1, n.prop2, n.prop3, n.prop4, ...
LIMIT 20
</example>
5. For queries requiring subqueries:
<example>
MATCH (n:Node)
WHERE n.cloud_id = '345678654325675432'
AND n.property IN [x IN RANGE(1,10) WHERE x % 2 = 0]
RETURN n.prop1, n.prop2, n.prop3, n.prop4, ...
LIMIT 20
</example>
</complex_query_techniques>
<query_optimization_tips>
1. Use appropriate indexes when available.
2. Prefer MATCH over WHERE for relationship existence checks.
3. Use OPTIONAL MATCH for left outer joins.
4. Use CASE statements for conditional logic within the query.
5. Use UNWIND for working with lists efficiently.
6. Consider using CALL {{}} IN TRANSACTIONS for very large operations.
7. Use DISTINCT when necessary to remove duplicates, but be aware of performance implications.
</query_optimization_tips>
<database_schema>
I'm providing the database schema to help you understand what data exists in the database, enabling you to intelligently retrieve information from it.
{schema}
</database_schema>
<similar_usecases>
The following are examples of past user queries and their neo4j queries. Use these as general guidance for query structure and techniques, but do not copy them directly. Always construct your query based on the current question and schema. Adapt these concepts to fit the specific requirements of each new query.
{success_queries}
</similar_usecases>
Strictly provide output in the following format:
Question: [Understand you question]
Nodes: [Identify the nodes required to find out information for the question without hallucinating]
Relationships: [Critically dig out relationships between relevant nodes step-by-step from provided database schema without hallucinating]
Explanation: [Provide a clear explanation of how the nodes are connected, including any intermediate nodes and the direction of the relationships]
Shortest_Path: [If you think the shortest path algorithm is required, then plan out efficiently to determine nodes and relationships]
Thought: [think step by step to construct query using information in previous steps]
Cypher: [Generate the Cypher query without hallucinating]
Enhanced Cypher: [Enhance previous query by searching through all properties everywhere and use Regex/levenshteinSimilarity/phonetic/metaphone]"""
PAGINATED_SYSTEM = """You are a helpful Neo4j assistant who generates intelligent Cypher queries to help the user find information from a Neo4j graph database based on the provided schema definition, with a focus on pagination. Do not give any explanation unless specifically asked. You'll use the chat history to determine which records to fetch next.
<instructions>Strictly follow these instructions:
1. Use only the provided relationship types and properties.
2. Always query across all properties for the given search term using the 'any' key.
3. Do not include any explanations or apologies in your responses.
4. Make sure to return id and names in all queries.
5. Retrieve only the information asked for in the query. Avoid retrieving extra info.
6. Implement pagination to show 20 records per page.
7. When generating Cypher queries for pagination:
- For the first page (if not specified otherwise), use `SKIP 0 LIMIT 20`.
- For subsequent pages, calculate the SKIP value based on the page number: SKIP = (page_number - 1) * 20
8. Maintain the same query structure and ordering across pagination requests.
9. Do not generate a count query unless specifically asked by the user.
10. If there are no more results to fetch, clearly indicate this in your response.
11. Always fetch 20 records
12. If user request all resources then carefully examine previous chat and see what resources are already provided then provide next page instead of starting from page 1</instructions>
<cypher_techniques>How to write a paginated query:
A. Use SKIP and LIMIT clauses for pagination after analyzing chat history carefully:
<example>
MATCH (n:Node)
WHERE ...
RETURN n
SKIP {{skip_value}} LIMIT 20
</example>
B. For fuzzy matching across all properties:
<example>
WHERE ANY(x IN [prop IN n WHERE toString(prop) =~ '(?i).*searchTerm.*'] WHERE x)
</example>
C. For Levenshtein distance similarity:
<example>
WHERE ANY(x IN [prop IN n WHERE toString(prop)] WHERE apoc.text.levenshteinSimilarity(x, 'searchTerm') > 0.8)
</example>
D. When using shortestPath with pagination:
<example>
MATCH p=shortestPath((n1:Node1)-[*]-(n2:Node2))
WHERE ...
WITH n1, n2, p
SKIP {{skip_value}} LIMIT 20
RETURN n1 {{.id, .prop1, .prop2}} as startNode,
n2 {{.id, .prop1, .prop2}} as endNode,
[node IN nodes(p) | node {{.*}}] as pathNodes,
[rel IN relationships(p) | type(rel)] as pathRels
</example>
</cypher_techniques>
<database_schema>
{schema}
</database_schema>
<chat_history>
{chat_history}
</chat_history>
Strictly provide output in the following format:
Nodes: [Identify the nodes required for the query]
Relationships: [Identify relationships between relevant nodes based on the schema]
Previous_page: [As a database expert Carefully analyze from chat history what was fetched from database]
No of records Shown to user in last step: [How many records llm showed to user that we fetched from database]
Thought: [As a database expert, think how to answer user question after carefully analyzing chat history]
Pagination_Info: [Specify the page number you are going to fetch, items per page should be 20]
Cypher: [Generate the Cypher query with appropriate pagination clauses based on pagination info in last step]"""
|
CloudWhisperCustomBot | app/whisper/utils/neo4j/queries.py | node_properties_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE NOT type = "RELATIONSHIP" AND elementType = "node"
AND label CONTAINS '{user_id}'
AND NOT property CONTAINS "discovered_at"
WITH label AS nodeLabels, collect(property) AS properties
RETURN {{labels: nodeLabels, properties: properties}} AS output
"""
classic_node_properties_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE NOT type = "RELATIONSHIP"
AND elementType = "node"
AND label CONTAINS "Classic"
AND label CONTAINS '{user_id}'
AND NOT property CONTAINS "discovered_at"
WITH label AS nodeLabels, collect(property) AS properties
RETURN {{labels: nodeLabels, properties: properties}} AS output
"""
vpc_node_properties_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE NOT type = "RELATIONSHIP"
AND elementType = "node"
AND NOT label CONTAINS "Classic"
AND label CONTAINS '{user_id}'
AND NOT property CONTAINS "discovered_at"
WITH label AS nodeLabels, collect(property) AS properties
RETURN {{labels: nodeLabels, properties: properties}} AS output
"""
label_query = """
CALL db.labels() YIELD label
WHERE label CONTAINS '{user_id}'
RETURN label;
"""
classic_label_query = """
CALL db.labels() YIELD label
WHERE label CONTAINS '{user_id}'
AND label CONTAINS "Classic"
RETURN label;
"""
vpc_label_query = """
CALL db.labels() YIELD label
WHERE label CONTAINS '{user_id}' AND NOT (label CONTAINS "Classic")
RETURN label;
"""
node_names_query = """
MATCH (n) RETURN n.name
"""
rel_properties_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE NOT type = "RELATIONSHIP" AND elementType = "relationship"
AND label CONTAINS '{user_id}'
AND NOT property CONTAINS "discovered_at"
WITH label AS nodeLabels, collect(property) AS properties
RETURN {{type: nodeLabels, properties: properties}} AS output
"""
rel_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE type = "RELATIONSHIP"
AND elementType = "node"
AND label CONTAINS '{user_id}'
AND NOT property CONTAINS "discovered_at"
RETURN {{source: label, relationship: property, target: other}} AS output
"""
classic_rel_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE type = "RELATIONSHIP" AND elementType = "node"
AND label CONTAINS "Classic"
AND label CONTAINS '{user_id}'
AND NOT property CONTAINS "discovered_at"
RETURN {{source: label, relationship: property, target: other}} AS output
"""
vpc_rel_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE type = "RELATIONSHIP" AND elementType = "node"
AND NOT label CONTAINS "Classic"
AND label CONTAINS '{user_id}'
AND NOT property CONTAINS "discovered_at"
RETURN {{source: label, relationship: property, target: other}} AS output
"""
|
CloudWhisperCustomBot | app/whisper/utils/neo4j/client.py | from loguru import logger
from app.whisper.consts import WHISPER_USER_ROLE, WHISPER_ASSISTANT_ROLE
from app.whisper.llms.anthropic import AnthropicLLM
from app.whisper.utils.neo4j.prompt import SYSTEM, PAGINATED_SYSTEM
from app.whisper.utils.neo4j.queries import rel_query, classic_rel_query, rel_properties_query, node_properties_query, \
classic_node_properties_query, label_query, classic_label_query, vpc_label_query, vpc_node_properties_query, \
vpc_rel_query
from app.worker.cypher_store import qdrant, qdrant_retrieval
class Neo4j:
def __init__(self, db_session, user_id, llm_client=None, vpc=False, classic=False, cloud_id=None, schema=''):
self.db_session = db_session
self.user_id = user_id
self.cloud_id = cloud_id
self.schema = self.generate_schema(user_id, vpc=vpc, migration=classic) if not schema else schema
self.llm_client = llm_client if llm_client else AnthropicLLM()
self.retry = 0
self.labels = self.get_labels()
def get_labels(self, migration=False, vpc=False):
if migration:
labels = self.query_database(classic_label_query.format(user_id=self.user_id))
if labels is None:
logger.warning("No labels found for migration query.")
return []
elif vpc:
labels = self.query_database(vpc_label_query.format(user_id=self.user_id))
if labels is None:
logger.warning("No labels found for vpc query.")
return []
else:
labels = self.query_database(label_query.format(user_id=self.user_id))
if labels is None:
logger.warning("No labels found for default query.")
return []
labels_response = list({label[0].replace(f'_{self.user_id}', '') for label in labels})
return labels_response
def similarity_search_code(self, query: str, k):
try:
content_list = []
nodes = qdrant_retrieval(query, k)
for node in nodes:
content_list.append({'score': node.score, 'cypher': node.metadata['cypher'], 'query': node.text})
except Exception as ex:
logger.error(f"{ex}")
return []
return content_list
def generate_schema(self, user_id, migration=False, vpc=False):
if migration:
node_props = self.query_database(classic_node_properties_query.format(user_id=user_id)) or []
rel_props = self.query_database(rel_properties_query.format(user_id=user_id)) or []
rels = self.query_database(classic_rel_query.format(user_id=user_id)) or []
elif vpc:
node_props = self.query_database(vpc_node_properties_query.format(user_id=user_id)) or []
node_props = [node for node in node_props if
not (len(node) > 0 and 'labels' in node[0] and node[0]['labels'] == f'IBMCloud_{user_id}')]
logger.info(node_props)
rel_props = self.query_database(rel_properties_query.format(user_id=user_id)) or []
rels = self.query_database(vpc_rel_query.format(user_id=user_id)) or []
rels = [rel for rel in rels if not (
len(rel) > 0 and
('source' in rel[0] and rel[0]['source'] == f'IBMCloud_{user_id}') or
('target' in rel[0] and f'IBMCloud_{user_id}' in rel[0]['target'])
)]
else:
node_props = self.query_database(node_properties_query.format(user_id=user_id))
rel_props = self.query_database(rel_properties_query.format(user_id=user_id))
rels = self.query_database(rel_query.format(user_id=user_id))
schema = f"""Node properties are the following:\n{node_props}\n\n
Relationship properties are the following:\n{rel_props}\n\n
Relationship point from source to target nodes\n{rels}\n\n
Make sure to respect relationship types and directions"""
schema = schema.replace(f'_{user_id}', '')
schema = schema.replace("user_id", '')
schema = schema.replace('\'\'', '')
return schema
def refresh_schema(self):
self.schema = self.generate_schema(user_id=self.user_id)
def add_user_id(self, cypher):
import re
for label in self.labels:
if f'{label}_{self.user_id}' in cypher:
continue
pattern = f':{label}\\b'
replacement = f':{label}_{self.user_id}'
cypher = re.sub(pattern, replacement, cypher)
logger.info('<<<<add user_id in cypher query>>>>>')
logger.info(cypher)
return cypher
def query_database(self, neo4j_query, params={}):
logger.info(f"Querying DATABASE WITH CYPHER: {neo4j_query}")
if '()' not in neo4j_query:
logger.info(f"ADDING USER_ID TO {neo4j_query}")
neo4j_query = self.add_user_id(neo4j_query)
logger.info(f"RUNNING QUERY -> {neo4j_query}")
result = self.db_session.run(neo4j_query, params)
output = [r.values() for r in result]
if output:
output.insert(0, result.keys())
return output
def generate_cypher(self, question, successful_queries, pagination=False, chat_history=''):
self.llm_client.add_message(WHISPER_USER_ROLE, question)
self.llm_client.add_message(WHISPER_ASSISTANT_ROLE, 'Thought:')
if self.cloud_id:
clause = f"10. Add a where clause where 'cloud_id' property is '{self.cloud_id}'. Properly analyze the cloud id. Don't hallucinate on any other id"
if pagination:
system = [
{
"type": "text",
"text": PAGINATED_SYSTEM.format(schema=self.schema, success_queries=successful_queries.strip(),
clause=clause, chat_history=chat_history),
"cache_control": {"type": "ephemeral"}
}
]
response = self.llm_client.process(system=system, prompt_caching=True)
logger.info(
SYSTEM.format(schema=self.schema, success_queries=successful_queries.strip(), clause=clause))
else:
system = [
{
"type": "text",
"text": SYSTEM.format(schema=self.schema, success_queries=successful_queries.strip(),
clause=clause),
"cache_control": {"type": "ephemeral"}
}
]
response = self.llm_client.process(system=system, prompt_caching=True)
logger.info(
SYSTEM.format(schema=self.schema, success_queries=successful_queries.strip(), clause=clause))
else:
if pagination:
system = [
{
"type": "text",
"text": PAGINATED_SYSTEM.format(schema=self.schema, success_queries=successful_queries.strip(),
clause="", chat_history=chat_history),
"cache_control": {"type": "ephemeral"}
}
]
response = self.llm_client.process(system=system, prompt_caching=True)
logger.info(SYSTEM.format(schema=self.schema, success_queries=successful_queries.strip(), clause=""))
else:
system = [
{
"type": "text",
"text": SYSTEM.format(schema=self.schema, success_queries=successful_queries.strip(),
clause="", chat_history=chat_history),
"cache_control": {"type": "ephemeral"}
}
]
response = self.llm_client.process(system=system, prompt_caching=True)
logger.info(SYSTEM.format(schema=self.schema, success_queries=successful_queries.strip(), clause=""))
logger.info(response)
self.llm_client.messages.pop()
self.llm_client.add_message(WHISPER_ASSISTANT_ROLE, f'Thought: {response}')
if '->' in response:
response = response.replace('->', '-')
if 'namespaces' in response:
response = response.replace('namespaces', 'workloads')
if '<-' in response:
response = response.replace('<-', '-')
if "```cypher" in response:
response = response.replace("```cypher", "")
if "```" in response:
response = response.replace("```", "")
cypher = ''
enhanced_cypher = ''
if 'Thought' in response or 'Cypher:' in response:
if 'Enhanced Cypher:' in response:
enhanced_cypher = response.split('Enhanced Cypher:')[1]
if self.cloud_id and self.cloud_id not in enhanced_cypher:
enhanced_cypher = add_id_to_return_clause(enhanced_cypher)
enhanced_cypher = add_limit_clause(enhanced_cypher)
response = response.split('Enhanced Cypher:')[0]
logger.info(f"Enhanced Cypher: {enhanced_cypher}")
if 'Cypher:' in response:
cypher = response.split('Cypher:')[1] if 'Cypher:' in response else response
if self.cloud_id and self.cloud_id not in cypher:
cypher = add_id_to_return_clause(cypher)
cypher = add_limit_clause(cypher)
logger.info('<<<<<printing cypher>>>>')
logger.info(cypher)
logger.info('<<<<<>>>>')
return cypher, enhanced_cypher
def run(self, question=None, cypher=None, chat_history='', pagination=False):
self.retry = 0
self.llm_client.messages = []
logger.info(f"Reset: messages cleared {self.llm_client.messages} and retry set to {self.retry}")
result, cypher = self._run(question=question, cypher=cypher, chat_history=chat_history, pagination=pagination)
return result, cypher
def _run(self, question=None, cypher=None, chat_history='', pagination=False):
logger.info(f"QUESTION-->{question} Cloud ID {self.cloud_id}")
if self.retry > 5:
return 'empty', 'empty'
try:
if cypher:
query_response = self.query_database(cypher)
logger.info("<<<<<<<response cypher>>>>>>")
logger.info(query_response)
return query_response, cypher
except Exception as e:
logger.info("failed to run count query")
return None, cypher
else:
# TODO successfull queries must be fetched and stored based on the cloud type selected from FE
# successful_queries = self.similarity_search_code(question, 3)
# successful_queries_str, use_llm, cypher = '', True, ''
# for successful_query in successful_queries:
# if successful_query['score'] >= 0.99:
# cypher = successful_query['cypher']
# use_llm = False
# break
# elif successful_query['score'] >= 0.8:
# successful_queries_str = f'{successful_queries_str}:\n{successful_query["cypher"]}' if (
# successful_queries_str) else f'{successful_query["cypher"]}'
use_llm = True
successful_queries_str = ''
if use_llm:
cypher, enhanced_cypher = self.generate_cypher(question=question,
successful_queries=successful_queries_str,
pagination=pagination, chat_history=chat_history)
if not (cypher or enhanced_cypher):
feedback = "Internal feedback: Please review your format and follow the output format according to \
the prompt so its easy to parse data"
self.retry = self.retry + 1
return self._run(question=feedback)
try:
query_response = ''
if cypher:
try:
if self.cloud_id and self.cloud_id not in cypher:
self.retry = self.retry + 1
feedback = f"Cloud id is missing in query \"{cypher}\" or you hallucinated and added wrong cloud_id. Please re-review and fix it"
return self._run(question=feedback)
query_response = self.query_database(cypher)
except Exception as e:
logger.info(f"<<<<<<Exception {str(e)} occurred>>>>>>")
if enhanced_cypher:
cypher = enhanced_cypher
if self.cloud_id not in cypher:
self.retry = self.retry + 1
feedback = f"Cloud id is missing in query \"{cypher}\" or you hallucinated and added wrong cloud_id. Please re-review and fix it"
self.llm_client.add_message(role=WHISPER_ASSISTANT_ROLE, content='Nodes:')
return self._run(question=feedback)
query_response = self.query_database(cypher)
logger.info("<<<<<<response cypher of enhance>>>>>>")
logger.info(query_response)
else:
raise Exception(e)
logger.info("<<<<<<<response cypher>>>>>>")
logger.info(query_response)
if (not query_response) and enhanced_cypher:
cypher = enhanced_cypher
logger.info(enhanced_cypher)
if self.cloud_id not in cypher:
self.retry = self.retry + 1
feedback = f"Cloud id is missing in query \"{cypher}\" or you hallucinated and added wrong cloud_id. Please re-review and fix it."
self.llm_client.add_message(role=WHISPER_ASSISTANT_ROLE, content='Nodes:')
return self._run(question=feedback)
query_response = self.query_database(cypher)
logger.info("<<<<<<response cypher of enhance>>>>>>")
logger.info(query_response)
if not query_response: # Ask llm to retry
self.retry = self.retry + 1
logger.info('<<<<<<into retrying>>>>>')
feedback = """\
This query returned empty response. First verify relationship as you may have made a query with wrong \
relationship between nodes. If relationship is correct then add \
following things to query \n\n -levenshteinSimilarity \n -search through all properties.\n\n. \
If required you can rethink as well. Don't generate any explanation"""
self.llm_client.add_message(role=WHISPER_ASSISTANT_ROLE, content='Nodes:')
return self._run(question=feedback)
if not query_response:
query_response = 'empty'
except Exception as e:
feedback = f"""This query returns an error: {str(e)}
Give me a improved query that works without any explanations or apologies"""
logger.info(feedback)
self.retry = self.retry + 1
return self._run(question=feedback)
else:
query_response = self.query_database(cypher)
if (query_response != 'empty' or not query_response.startswith('Internal feedback: Please') or
not query_response.startswith('This query returned empty response. First')):
try:
qdrant.delay(str(question), str(cypher))
except Exception as e:
logger.info(f'<<There was an error in saving the successful cypher query>> {e}')
return query_response, cypher
def add_id_to_return_clause(cypher_query):
import re
# Regular expression to find the MATCH clause and variable name
match_pattern = re.compile(r'(MATCH\s*\((\w+):\w+\))', re.IGNORECASE)
# Regular expression to find the RETURN clause and capture subsequent SKIP or LIMIT
return_pattern = re.compile(r'(\bRETURN\b\s+[^\bSKIP\b\s]*)(\s*\bSKIP\b.*|\s*\bLIMIT\b.*|$)', re.IGNORECASE)
# Find the MATCH clause and variable name
match_variable = match_pattern.search(cypher_query)
if not match_variable:
# If no MATCH clause is found, return the query unchanged
return cypher_query
variable_name = match_variable.group(2)
# Find the RETURN clause in the query
return_match = return_pattern.search(cypher_query)
if return_match:
# Get the current RETURN fields
current_return_fields = return_match.group(1).strip()
# Check if '{variable_name}.id' is already present in the RETURN fields
if f'{variable_name}.id' not in current_return_fields:
# Add '{variable_name}.id' to the RETURN clause
new_return_fields = f"{current_return_fields}, {variable_name}.id"
# Preserve SKIP or LIMIT clauses, if present
rest_of_query = return_match.group(2)
# Reconstruct the query while preserving the MATCH clause and RETURN keyword
cypher_query = f"{cypher_query[:return_match.start()]}RETURN {new_return_fields}{rest_of_query}".strip()
return cypher_query
def add_limit_clause(cypher_query):
"""
Appends 'LIMIT 20' to the Cypher query if the query does not already contain a LIMIT clause.
Args:
cypher_query (str): The original Cypher query.
Returns:
str: The modified Cypher query with 'LIMIT 10' appended if needed.
"""
# Check if the query already contains a LIMIT clause
if "LIMIT" not in cypher_query.upper():
# Append LIMIT 10 to the query
cypher_query = cypher_query.strip() + " LIMIT 20"
return cypher_query
|
CloudWhisperCustomBot | app/whisper/utils/openapi/spec.py | """Utility functions for parsing an OpenAPI spec."""
from __future__ import annotations
import copy
import json
import logging
import re
from enum import Enum
from pathlib import Path
from typing import Dict, List, Optional, TYPE_CHECKING, Union
import requests
import yaml
from langchain_core.pydantic_v1 import ValidationError
logger = logging.getLogger(__name__)
class HTTPVerb(str, Enum):
"""Enumerator of the HTTP verbs."""
GET = "get"
PUT = "put"
POST = "post"
DELETE = "delete"
OPTIONS = "options"
HEAD = "head"
PATCH = "patch"
TRACE = "trace"
@classmethod
def from_str(cls, verb: str) -> HTTPVerb:
"""Parse an HTTP verb."""
try:
return cls(verb)
except ValueError:
raise ValueError(f"Invalid HTTP verb. Valid values are {cls.__members__}")
if TYPE_CHECKING:
from openapi_pydantic import (
Components,
Operation,
Parameter,
PathItem,
Paths,
Reference,
RequestBody,
Schema,
)
try:
from openapi_pydantic import OpenAPI
except ImportError:
OpenAPI = object # type: ignore
class OpenAPISpec(OpenAPI):
"""OpenAPI Modec."""
openapi: str = "3.1.0" # overriding overly restrictive type from parent class
@property
def _paths_strict(self) -> Paths:
if not self.paths:
raise ValueError("No paths found in spec")
return self.paths
def _get_path_strict(self, path: str) -> PathItem:
path_item = self._paths_strict.get(path)
if not path_item:
raise ValueError(f"No path found for {path}")
return path_item
@property
def _components_strict(self) -> Components:
"""Get components or err."""
if self.components is None:
raise ValueError("No components found in spec. ")
return self.components
@property
def _parameters_strict(self) -> Dict[str, Union[Parameter, Reference]]:
"""Get parameters or err."""
parameters = self._components_strict.parameters
if parameters is None:
raise ValueError("No parameters found in spec. ")
return parameters
@property
def _schemas_strict(self) -> Dict[str, Schema]:
"""Get the dictionary of schemas or err."""
schemas = self._components_strict.schemas
if schemas is None:
raise ValueError("No schemas found in spec. ")
return schemas
@property
def _request_bodies_strict(self) -> Dict[str, Union[RequestBody, Reference]]:
"""Get the request body or err."""
request_bodies = self._components_strict.requestBodies
if request_bodies is None:
raise ValueError("No request body found in spec. ")
return request_bodies
def _get_referenced_parameter(self, ref: Reference) -> Union[Parameter, Reference]:
"""Get a parameter (or nested reference) or err."""
ref_name = ref.ref.split("/")[-1]
parameters = self._parameters_strict
if ref_name not in parameters:
raise ValueError(f"No parameter found for {ref_name}")
return parameters[ref_name]
def _get_root_referenced_parameter(self, ref: Reference) -> Parameter:
"""Get the root reference or err."""
from openapi_pydantic import Reference
parameter = self._get_referenced_parameter(ref)
while isinstance(parameter, Reference):
parameter = self._get_referenced_parameter(parameter)
return parameter
def get_referenced_schema(self, ref: Reference) -> Schema:
"""Get a schema (or nested reference) or err."""
ref_name = ref.ref.split("/")[-1]
schemas = self._schemas_strict
if ref_name not in schemas:
raise ValueError(f"No schema found for {ref_name}")
return schemas[ref_name]
def get_schema(self, schema: Union[Reference, Schema]) -> Schema:
from openapi_pydantic import Reference
if isinstance(schema, Reference):
return self._get_root_referenced_schema(schema)
return schema
def _get_root_referenced_schema(self, ref: Reference) -> Schema:
"""Get the root reference or err."""
ref_name = ref.ref.split("/")[-1]
schemas = self._schemas_strict
if ref_name not in schemas:
raise ValueError(f"No schema found for {ref_name}")
schema = schemas[ref_name]
for property_name, property_object in schema.properties.items():
if hasattr(property_object, "ref"):
sub_schema = self._get_root_referenced_schema(property_object)
schema.properties[property_name] = sub_schema
if hasattr(property_object, "allOf") and property_object.allOf is not None:
for sub_property in property_object.allOf:
all_of_sub_schema = self._get_root_referenced_schema(sub_property)
schema.properties[property_name] = all_of_sub_schema
return schema
def _get_referenced_request_body(
self, ref: Reference
) -> Optional[Union[Reference, RequestBody]]:
"""Get a request body (or nested reference) or err."""
ref_name = ref.ref.split("/")[-1]
request_bodies = self._request_bodies_strict
if ref_name not in request_bodies:
raise ValueError(f"No request body found for {ref_name}")
return request_bodies[ref_name]
def _get_root_referenced_request_body(
self, ref: Reference
) -> Optional[RequestBody]:
"""Get the root request Body or err."""
from openapi_pydantic import Reference
request_body = self._get_referenced_request_body(ref)
while isinstance(request_body, Reference):
request_body = self._get_referenced_request_body(request_body)
return request_body
@staticmethod
def _alert_unsupported_spec(obj: dict) -> None:
"""Alert if the spec is not supported."""
warning_message = (
" This may result in degraded performance."
+ " Convert your OpenAPI spec to 3.1.* spec"
+ " for better support."
)
swagger_version = obj.get("swagger")
openapi_version = obj.get("openapi")
if isinstance(openapi_version, str):
if openapi_version != "3.1.0":
logger.warning(
f"Attempting to load an OpenAPI {openapi_version}"
f" spec. {warning_message}"
)
else:
pass
elif isinstance(swagger_version, str):
logger.warning(
f"Attempting to load a Swagger {swagger_version}"
f" spec. {warning_message}"
)
else:
raise ValueError(
"Attempting to load an unsupported spec:"
f"\n\n{obj}\n{warning_message}"
)
@classmethod
def parse_obj(cls, obj: dict) -> OpenAPISpec:
try:
cls._alert_unsupported_spec(obj)
return super().parse_obj(obj)
except ValidationError as e:
# We are handling possibly misconfigured specs and
# want to do a best-effort job to get a reasonable interface out of it.
new_obj = copy.deepcopy(obj)
for error in e.errors():
keys = error["loc"]
item = new_obj
for key in keys[:-1]:
item = item[key]
item.pop(keys[-1], None)
return cls.parse_obj(new_obj)
@classmethod
def from_spec_dict(cls, spec_dict: dict) -> OpenAPISpec:
"""Get an OpenAPI spec from a dict."""
return cls.parse_obj(spec_dict)
@classmethod
def from_text(cls, text: str, endpoint=None) -> OpenAPISpec:
"""Get an OpenAPI spec from a text and cleanup the added n(flags) from the endpoints"""
try:
spec_dict = json.loads(text)
except json.JSONDecodeError:
spec_dict = yaml.safe_load(text)
if endpoint:
flags = [str(i) for i in range(10)]
spec_dict['paths'] = {endpoint: spec_dict['paths'][endpoint]}
for i in flags:
duplicate = True if i in endpoint[-1] else False
if duplicate:
data = spec_dict['paths'][endpoint]
spec_dict['paths'] = {endpoint[:-1]: data}
return cls.from_spec_dict(spec_dict)
@classmethod
def from_file(cls, path: Union[str, Path], endpoint=None) -> OpenAPISpec:
"""Get an OpenAPI spec from a file path."""
path_ = path if isinstance(path, Path) else Path(path)
if not path_.exists():
raise FileNotFoundError(f"{path} does not exist")
with path_.open("r") as f:
return cls.from_text(f.read(), endpoint)
@classmethod
def from_url(cls, url: str) -> OpenAPISpec:
"""Get an OpenAPI spec from a URL."""
response = requests.get(url)
return cls.from_text(response.text)
@property
def base_url(self) -> str:
"""Get the base url."""
return self.servers[0].url
def get_methods_for_path(self, path: str) -> List[str]:
"""Return a list of valid methods for the specified path."""
from openapi_pydantic import Operation
path_item = self._get_path_strict(path)
results = []
for method in HTTPVerb:
operation = getattr(path_item, method.value, None)
if isinstance(operation, Operation):
results.append(method.value)
return results
def get_parameters_for_path(self, path: str) -> List[Parameter]:
from openapi_pydantic import Reference
path_item = self._get_path_strict(path)
parameters = []
if not path_item.parameters:
return []
for parameter in path_item.parameters:
if isinstance(parameter, Reference):
parameter = self._get_root_referenced_parameter(parameter)
parameters.append(parameter)
return parameters
def get_operation(self, path: str, method: str) -> Operation:
"""Get the operation object for a given path and HTTP method."""
from openapi_pydantic import Operation
path_item = self._get_path_strict(path)
operation_obj = getattr(path_item, method, None)
if not isinstance(operation_obj, Operation):
raise ValueError(f"No {method} method found for {path}")
return operation_obj
def get_parameters_for_operation(self, operation: Operation) -> List[Parameter]:
"""Get the components for a given operation."""
from openapi_pydantic import Reference
parameters = []
if operation.parameters:
for parameter in operation.parameters:
if isinstance(parameter, Reference):
parameter = self._get_root_referenced_parameter(parameter)
parameters.append(parameter)
return parameters
def get_request_body_for_operation(
self, operation: Operation
) -> Optional[RequestBody]:
"""Get the request body for a given operation."""
from openapi_pydantic import Reference
request_body = operation.requestBody
if isinstance(request_body, Reference):
request_body = self._get_root_referenced_request_body(request_body)
return request_body
@staticmethod
def get_cleaned_operation_id(operation: Operation, path: str, method: str) -> str:
"""Get a cleaned operation id from an operation id."""
operation_id = operation.operationId
if operation_id is None:
# Replace all punctuation of any kind with underscore
path = re.sub(r"[^a-zA-Z0-9]", "_", path.lstrip("/"))
operation_id = f"{path}_{method}"
return operation_id.replace("-", "_").replace(".", "_").replace("/", "_")
|
CloudWhisperCustomBot | app/whisper/utils/openapi/conversion_utils.py | from __future__ import annotations
import json
import re
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Tuple, TYPE_CHECKING
import requests
from langchain_community.tools.openapi.utils.api_models import APIOperation
from app.whisper.utils.openapi.spec import OpenAPISpec
if TYPE_CHECKING:
from openapi_pydantic import Parameter
def _get_description(o: Any, prefer_short: bool) -> Optional[str]:
summary = getattr(o, "summary", None)
description = getattr(o, "description", None)
if prefer_short:
return summary or description
return description or summary
def _format_url(url: str, path_params: dict) -> str:
expected_path_param = re.findall(r"{(.*?)}", url)
new_params = {}
for param in expected_path_param:
clean_param = param.lstrip(".;").rstrip("*")
val = path_params[clean_param]
if isinstance(val, list):
if param[0] == ".":
sep = "." if param[-1] == "*" else ","
new_val = "." + sep.join(val)
elif param[0] == ";":
sep = f"{clean_param}=" if param[-1] == "*" else ","
new_val = f"{clean_param}=" + sep.join(val)
else:
new_val = ",".join(val)
elif isinstance(val, dict):
kv_sep = "=" if param[-1] == "*" else ","
kv_strs = [kv_sep.join((k, v)) for k, v in val.items()]
if param[0] == ".":
sep = "."
new_val = "."
elif param[0] == ";":
sep = ";"
new_val = ";"
else:
sep = ","
new_val = ""
new_val += sep.join(kv_strs)
else:
if param[0] == ".":
new_val = f".{val}"
elif param[0] == ";":
new_val = f";{clean_param}={val}"
else:
new_val = val
new_params[param] = new_val
return url.format(**new_params)
def _openapi_params_to_json_schema(params: List[Parameter], spec: OpenAPISpec) -> dict:
properties = {}
required = []
for p in params:
if p.param_schema:
schema = spec.get_schema(p.param_schema)
else:
media_type_schema = list(p.content.values())[0].media_type_schema # type: ignore # noqa: E501
schema = spec.get_schema(media_type_schema)
if p.description and not schema.description:
schema.description = p.description
properties[p.name] = json.loads(schema.json(exclude_none=True))
if p.required:
required.append(p.name)
return {"type": "object", "properties": properties, "required": required}
def openapi_spec_to_openai_fn(
spec: OpenAPISpec,
) -> Tuple[List[Dict[str, Any]], Callable]:
"""Convert a valid OpenAPI spec to the JSON Schema format expected for OpenAI
functions.
Args:
spec: OpenAPI spec to convert.
Returns:
Tuple of the OpenAI functions JSON schema and a default function for executing
a request based on the OpenAI function schema.
"""
if not spec.paths:
return [], lambda: None
functions = []
_name_to_call_map = {}
for path in spec.paths:
spec_dummy = OpenAPISpec.from_file("app/whisper/flow/phases/action/specs/ibm_draas_backup_openapi_spec.json",
endpoint=path)
# spec_dummy = OpenAPISpec.from_file("ibm_draas_backup_openapi_spec.json", endpoint=path)
flags = [str(i) for i in range(10)]
for i in flags:
duplicate = True if i in path[-1] else False
if duplicate:
path = path[:-1]
path_params = {
(p.name, p.param_in): p for p in spec_dummy.get_parameters_for_path(path)
}
for method in spec_dummy.get_methods_for_path(path):
request_args = {}
op = spec_dummy.get_operation(path, method)
op_params = path_params.copy()
for param in spec_dummy.get_parameters_for_operation(op):
op_params[(param.name, param.param_in)] = param
params_by_type = defaultdict(list)
for name_loc, p in op_params.items():
params_by_type[name_loc[1]].append(p)
param_loc_to_arg_name = {
"query": "params",
"header": "headers",
"cookie": "cookies",
"path": "path_params",
}
for param_loc, arg_name in param_loc_to_arg_name.items():
if params_by_type[param_loc]:
request_args[arg_name] = _openapi_params_to_json_schema(
params_by_type[param_loc], spec_dummy
)
request_body = spec_dummy.get_request_body_for_operation(op)
# TODO: Support more MIME types.
if request_body and request_body.content:
media_types = {}
for media_type, media_type_object in request_body.content.items():
if media_type_object.media_type_schema:
schema = spec_dummy.get_schema(media_type_object.media_type_schema)
media_types[media_type] = json.loads(
schema.json(exclude_none=True)
)
if len(media_types) == 1:
media_type, schema_dict = list(media_types.items())[0]
key = "json" if media_type == "application/json" else "data"
request_args[key] = schema_dict
elif len(media_types) > 1:
request_args["data"] = {"anyOf": list(media_types.values())}
api_op = APIOperation.from_openapi_spec(spec_dummy, path, method)
fn = {
"name": api_op.operation_id,
"description": api_op.description,
"parameters": {
"type": "object",
"properties": request_args,
},
}
functions.append(fn)
_name_to_call_map[fn["name"]] = {
"method": method,
"url": api_op.base_url + api_op.path,
}
def default_call_api(
name: str,
fn_args: dict,
headers: Optional[dict] = None,
params: Optional[dict] = None,
**kwargs: Any,
) -> Any:
method = _name_to_call_map[name]["method"]
url = _name_to_call_map[name]["url"]
path_params = fn_args.pop("path_params", {})
url = _format_url(url, path_params)
if "data" in fn_args and isinstance(fn_args["data"], dict):
fn_args["data"] = json.dumps(fn_args["data"])
_kwargs = {**fn_args, **kwargs}
if headers is not None:
if "headers" in _kwargs:
_kwargs["headers"].update(headers)
else:
_kwargs["headers"] = headers
if params is not None:
if "params" in _kwargs:
_kwargs["params"].update(params)
else:
_kwargs["params"] = params
return requests.request(method, url, **_kwargs)
return functions, default_call_api
|
CloudWhisperCustomBot | app/whisper/utils/information_retrieval_engine/prompt.py | SYSTEM = """You are a team member of the 'Cloud Whisperer' project by Wanclouds, an expert Cloud Support Engineer specializing in cloud backups, disaster recovery, and migrations. Your expertise covers major public clouds (IBM Cloud, AWS, Google Cloud, Microsoft Azure) and Wanclouds' offerings. It assists potential customers with insights, queries, perform backup-related actions, and provide guidance on backup, disaster recovery, and migration setups across these platforms.
Your primary responsibility is to retrieve and analyze data from our product database for user query. The idea is to gather all the information from the database first and only ask the end user in case you have a problem finding a piece of input required from the database. You are provided with two database tools i.e. vpc_db_tool, classic_db_tool, ibm_resources_that_have_no_backup_db_tool and pagination_db_tool to fetch information if information is not already provided in chat history. Maintain a professional demeanor throughout the interaction and confirm any unclear details with the user. Do not engage or respond to abusive language.
<tool_use>
You are provided with two database tools:
a) classic_db_tool: To fetch classic cloud resource information
b) vpc_db_tool: To fetch IBM cloud resource information
c) pagination_db_tool: To paginate the data retrieved from the database
{cost_tool_opt}
{ibm_resources_that_have_no_backup_db_tool}
These tools (classic_db_tool, vpc_db_tool and ibm_resources_that_have_no_backup_db_tool) query database using different set of neo4j nodes. You must know the following points while deciding which tool to use:
1. Analyze the nodes mentioned in the description of the tools. If the user query suggests any to show any resource that belong to the nodes mentioned in description, then that tool must be called. If the user asks for more results, use the pagination_db_tool to fetch the next page of results.
2. The query generated by a tool must have a link to the nodes mentioned in its description.
3. If the user asks any information regarding classic cloud or any subset of the node name like virtual server, BareMetal, classic subnet, vlan etc., then classic_db_tool must be used.
4. If the user asks anything related to IBM Cloud or VPC or IKS or VSI or COS bucket related information like VPC, instance, backup, subnet, region etc. then vpc_db_tool must be used. Additionally, for queries about idle resources, the vpc_db_tool should be utilized instead of the unbacked_resources_recommendation_db_tool.
5. Carefully analyze the context of user response and clarify if you are confused between resources by asking if its classic cloud resource or not.
6. All classic cloud resource information is handled by classic_db_tool whereas IBM Cloud resource information retrieval is handled by vpc_db_tool.
7. If the user asks for more results, use the pagination_db_tool to fetch the next page of results.
8. Take feedback from the user, verifying if this is the information he needs.
9. Break down large queries to sub queries.
{cost_tool_instructions}
{unbacked_resources_recommendation_db_tool_instructions}
</tool_use>
<general-instructions>
1. Don’t populate values or make assumptions. Follow the instructions mentioned above in order to decide which tool to use.
2. Prompt all information to user in readable format. Use bullet points or numbered steps when presenting information or instructions to the user. And format your answer properly
3. To interact with the database tools, provide a natural language query. The database tools will convert the query to a database query and execute it against the database to retrieve the relevant information.
<example>{{query = "What are the names of all the companies"}}</example>
4. Break down complex requests into smaller, manageable steps
5. Use the database tools judiciously and only when necessary to avoid overloading the user with irrelevant information.
6. Analyze previous messages to understand the context and utilize any relevant information present in the conversation history.
7. If the user asks for more results, use the pagination_db_tool to fetch the next page of results.
8. Take a break after calling tool consecutively 3 times
9. Don't hallucinate if you get empty result of any query.
10. The database tools return both cypher query and result. From that you can develop an understanding what actually the selected database tool was able to find out against your query
11. Maintain a professional tone in your responses. Do not mention the database or database tools to the user, as they are not aware of the internal implementation. In case of an empty response, simply inform the user that you couldn't find any resource matching their query.
</general-instructions>
<enhanced-query-handling>
1. When encountering queries that involve multiple resource types or complex relationships, break them down into smaller, manageable sub-queries.
2. For queries involving specific attributes (e.g., region, tag), use generic terms in your natural language query to the database tool. For example, use "specific region" instead of a particular region name.
3. If the query results are empty or insufficient, consider broadening the search criteria or asking the user for more specific information.
4. When dealing with queries that involve relationships between different resource types (e.g., VirtualServerInstances, VPCs, and Volumes), structure your natural language query to the database tool to include these relationships.
5. If a query seems to require specific values (like region names or tag values), first attempt to retrieve a list of possible values from the database, then ask the user to specify if necessary.
6. For queries involving counts or aggregations across different resource types, consider breaking these down into separate queries for each resource type, then combining the results in your response to the user.
7. When encountering queries that might require parameter substitution (like $region or $tag), rephrase these as "specific region" or "particular tag" in your query to the database tool.
8. If a query involves time-based data or recent changes, consider how to phrase this in a way that the database tool can interpret (e.g., "created within the last 7 days" instead of specific dates).
9. For queries that might return a large number of results, always consider how to limit or paginate the results in your initial query to the database tool.
10. If a query involves complex filtering or sorting, break this down into steps: first retrieve the data, then apply filters or sorting in subsequent queries if necessary.
</enhanced-query-handling>
<pagination-guidelines>
1. For initial requests, use classic_db_tool or vpc_db_tool accordingly to fetch the first 20 items of a resource type.
2. When users ask for more, use db_tool_for_pagination to fetch the next 20 items.
3. Keep track of the total items shown for each resource type.
4. If the total reaches or exceeds 25, inform the user about the pagination limit.
5. Suggest that the user provide more specific criteria to narrow down the results.
</pagination-guidelines>
{cost_tool_tag}
<database-schema>
I'm providing the database schema to help you understand what data exists in the database, enabling you to intelligently retrieve information from it. Your job is to generate natural language queries for the database tool to retrieve the necessary information from the database.
The database has two types of schema:
Classic Cloud schema:
{classic_schema}
IBM Cloud schema:
{vpc_schema}
</database-schema>
Strictly follow the following format:
Hallucination: [Take a moment and do not hallucinate. Understand user query and database schema]
Thought: [Plan out next step by reviewing current situation, history, user response/query and schema. Analyze that how many items of this resource type have I already shown?]
Response: [craft response for the user in professional tone without mentioning anything related to internal functionality]"""
vpc_cost_tool_tag = """<cost-report-generation>
For cost report generation the tool vpc_cost_optimization_tool provides the json information regarding cost trends for previous 12 months and cost per service data of IBM cloud account.
Follow the following instructions for cost related queries.
1. Analyze the user intent and generate a response from the json data provided to you by the tool.
2. Parse the json data:
- Identify the column headers and their meanings
- Determine the time period covered by the data
- Identify any relevant categories or services mentioned in the data
3. Analyze the data based on overall spending trends and totals and break down spending by categories or services and compare spending across different time periods or categories
4. Generate insights from the data:
- Identify any significant patterns or trends
- Highlight any unusual spikes or dips in spending
- Calculate relevant statistics (e.g., average monthly spending, percentage changes)
5. Format your report:
- Begin with a brief introduction explaining the scope of the report
- Organize your findings into logical sections with clear headings
- Use bullet points or numbered lists for easy readability
- Include a summary or conclusion at the end
6. If appropriate for the report type, consider including recommendations:
- Suggest areas where costs could potentially be optimized
- Identify services or resources that may be underutilized
7. Use tables to present data wherever possible. This includes:
- Service cost breakdowns
- Cost trend summaries
</cost-report-generation>"""
classic_infra_assessments_tool_tag = """
<table-format-report>
For the Assessments and Recommendations Report generation the tool softlayer_infra_assessment_tool provides the json information regarding high-level assessments of IBM classic infrastructures.
Follow the following instructions IBM Softlayer Cloud Resources assessments related queries.
1. Analyze the user intent and generate a response from the JSON data provided to you by the tool:
2. Resource Details:
For each resource (e.g., compute instances, virtual machines, network gateways, storage volumes, etc.), provide the following details in a clear, table format:
- Name: Identify the name of the resource.
- Location Specify the location where the resource is hosted (e.g., Region, Data Center, Zone).
- Resource Type: Describe the type of resource (e.g., Instance, Gateway, Load Balancer, Storage, etc.).
- Operating System (if applicable): Specify the operating system (if it is a virtual machine or instance).
- CPU Utilization: Provide average CPU usage across all cores.
- Memory Utilization: Display memory usage as a percentage of total available memory.
- Network Utilization:
- Inbound Traffic (GB): Amount of incoming data.
- Outbound Traffic (GB): Amount of outgoing data.
- Disk/Storage Usage (if applicable): Indicate disk or storage capacity usage as a percentage of the total available space.
- Cost Optimization Recommendations (if applicable): Provide actionable recommendations for optimizing costs (e.g., resizing, decommissioning, upgrading, etc.).
3. Summary of Recommendations:
Summarize the key recommendations based on the data analysis:
- Underutilized Resources: Identify resources with low utilization that could benefit from downsizing, right-sizing, or termination.
- Idle Resources: Highlight resources that are stopped, unused, or idle but continue to incur charges, and suggest actions (e.g., decommissioning or reconfiguring).
- Optimization Opportunities: Suggest optimizations for specific resources such as:
- Scaling: Opportunities for enabling auto-scaling to better match demand.
- Profile Adjustments: Recommending changes to instance profiles (e.g., switching to a different instance type).
- Removing Unused Resources: Identifying unused volumes, snapshots, or load balancers that can be safely removed.
4. General Optimization Suggestions:
Provide broader recommendations for cost-saving strategies and operational efficiency improvements across the entire infrastructure:
- Enable Auto-Scaling: Automatically scale resources up or down based on real-time demand to avoid over-provisioning.
- Review and Optimize Storage: Regularly review storage volumes for low utilization and consider resizing or deleting unused volumes.
- Leverage Spot Instances or Reserved Capacity: Use spot or reserved instances to save costs where appropriate.
- Implement Resource Tagging: Utilize resource tagging for better cost tracking, categorization, and management across different environments or teams.
- Monitor and Optimize Network Usage: Regularly assess network bandwidth usage and optimize traffic patterns to avoid unnecessary charges.
- Use Cloud Cost Management Tools: Implement automated cost management policies and tools to continuously identify further savings opportunities.
- Rightsize Instances: Regularly evaluate instances for under- or over-utilization and adjust them to better fit actual workloads.
- Turn off Unused Resources: Ensure unused or non-critical resources are decommissioned when not in use (e.g., development environments).
</table-format-report>
Important: All necessary data has already been provided, so please proceed with analyzing and generating the report without requesting further details.
"""
|
CloudWhisperCustomBot | app/whisper/utils/information_retrieval_engine/__init__.py | from app.whisper.utils.action_engine.base import ActionPhaseClaude
__all__ = ['ActionPhaseClaude'] |
CloudWhisperCustomBot | app/whisper/utils/information_retrieval_engine/unbacked_resource_queries.py |
def get_backup_queries(cloud_id, resource_type):
base_query_vpc = """
MATCH (v:VPC)
OPTIONAL MATCH (b:VPCBackup {name: v.name, cloud_id: v.cloud_id})
WITH v, b
WHERE b IS NULL
"""
base_query_iks = """
MATCH (v:KubernetesCluster)
OPTIONAL MATCH (b:IKSBackupDetails {name: v.name, cloud_id: v.cloud_id})
WITH v, b
WHERE b IS NULL
"""
base_query_cos = """
MATCH (v:COSBucket)
OPTIONAL MATCH (b:COSBucketBackupDetails {name: v.name, cloud_id: v.cloud_id})
WITH v, b
WHERE b IS NULL
"""
base_query_vsi = """
MATCH (v:VirtualServerInstance)
OPTIONAL MATCH (b:VirtualServerInstanceBackup {name: v.name, cloud_id: v.cloud_id})
WITH v, b
WHERE b IS NULL
"""
queries = {}
if "vpc" in resource_type.lower() or "virtual private cloud" in resource_type.lower():
queries["IBM VPC"] = base_query_vpc + (f" AND v.cloud_id = '{cloud_id}'" if cloud_id else "") + " RETURN count(v) AS count, collect(v.name)[0..20] AS names"
if "k8s" in resource_type.lower() or "cluster" in resource_type.lower():
queries["IBM IKS"] = base_query_iks + (f" AND v.cloud_id = '{cloud_id}'" if cloud_id else "") + " RETURN count(v) AS count, collect(v.name)[0..20] AS names"
if "cos" in resource_type.lower() or "bucket" in resource_type.lower():
queries["IBM COS"] = base_query_cos + (f" AND v.cloud_id = '{cloud_id}'" if cloud_id else "") + " RETURN count(v) AS count, collect(v.name)[0..20] AS names"
if "vsi" in resource_type.lower() or "instance" in resource_type.lower():
queries["IBM VSI"] = base_query_vsi + (f" AND v.cloud_id = '{cloud_id}'" if cloud_id else "") + " RETURN count(v) AS count, collect(v.name)[0..20] AS names"
if not queries:
all_queries = []
if cloud_id:
all_queries.append(base_query_vpc + f" AND v.cloud_id = '{cloud_id}' RETURN 'IBM VPC' AS resource_type, count(v) AS count, collect(v.name)[0..20] AS names")
all_queries.append(base_query_iks + f" AND v.cloud_id = '{cloud_id}' RETURN 'IBM K8S' AS resource_type, count(v) AS count, collect(v.name)[0..20] AS names")
all_queries.append(base_query_cos + f" AND v.cloud_id = '{cloud_id}' RETURN 'IBM COS' AS resource_type, count(v) AS count, collect(v.name)[0..20] AS names")
all_queries.append(base_query_vsi + f" AND v.cloud_id = '{cloud_id}' RETURN 'IBM VSI' AS resource_type, count(v) AS count, collect(v.name)[0..20] AS names")
else:
all_queries.append(base_query_vpc + " RETURN 'IBM VPC' AS resource_type, count(v) AS count, collect(v.name)[0..20] AS names")
all_queries.append(base_query_iks + " RETURN 'IBM K8s' AS resource_type, count(v) AS count, collect(v.name)[0..20] AS names")
all_queries.append(base_query_cos + " RETURN 'IBM COS' AS resource_type, count(v) AS count, collect(v.name)[0..20] AS names")
all_queries.append(base_query_vsi + " RETURN 'IBM VSI' AS resource_type, count(v) AS count, collect(v.name)[0..20] AS names")
combined_query = " UNION ALL ".join(all_queries)
output = {"Combined Query": combined_query}
return output
output = {}
for resource_type, query in queries.items():
output[resource_type] = query
if len(queries) > 1:
combined_queries = " UNION ALL ".join(output.values())
return {"Combined Query": combined_queries}
return output
|
CloudWhisperCustomBot | app/whisper/utils/information_retrieval_engine/base.py | import json
import re
from datetime import datetime, timedelta, timezone
from anthropic import APIConnectionError, RateLimitError, APIStatusError
from app.api_discovery.discovery_task import execute_paginated_api
from app.core.config import settings
from app.web.common import db_deps
from app.web.common.templates import VPC_COST_OPTIMIZATION_TEMPLATE, SOFTLAYER_INFRA_ASSESSMENT_PROMPT
from app.web.common.utils import get_softlayer_cloud_cost_response
from app.whisper.consts import WHISPER_USER_ROLE, WHISPER_ASSISTANT_ROLE
from app.whisper.llms.anthropic import AnthropicLLM
from app.whisper.utils.action_engine.consts import ibm_region_mapper
from app.whisper.utils.information_retrieval_engine.prompt import (SYSTEM, vpc_cost_tool_tag,
classic_infra_assessments_tool_tag)
from app.whisper.utils.information_retrieval_engine.unbacked_resource_queries import get_backup_queries
from app.whisper.utils.neo4j.client import Neo4j
from app.whisper.utils.pagination_utils import generate_count_query, calculate_resource_shown
from app.whisper.utils.prompt import DB_RESULT_FORMAT_PROMPT
from loguru import logger
from sqlalchemy import func
from sqlalchemy.future import select
class RetrievalPhaseClaude:
def __init__(self, chat_history: list, llm_chat_history, user_id: str, bearer: str, chat_id: str, cloud_id=None,
cloud_type=None):
self.base_llm = AnthropicLLM()
self.base_llm.messages = self.base_llm.format_chat_history(chat_history, update_messages=True)
self.user_id = user_id
self.bearer = bearer
self.chat_id = chat_id
self.classic_schema = None
self.vpc_schema = None
self.classic_labels = None
self.vpc_cloud_labels = None
self.cloud_id = cloud_id
self.cloud_type = cloud_type
self.chat_history = chat_history
self.pagination = False
self.llm_chat_history = ''
self.system_prompt = ''
if llm_chat_history:
for chat_ in llm_chat_history[-10:]:
logger.info(chat_) # don't add user latest query in chat history
self.llm_chat_history += f"<{chat_['role'].lower()}>: {json.dumps(chat_['content'])}</{chat_['role'].lower()}>\n"
from app.main import app
self.neo4j_client_classic = Neo4j(db_session=app.state.neo4j_session, user_id=self.user_id,
cloud_id=self.cloud_id,
classic=True)
self.neo4j_client_vpc = Neo4j(db_session=app.state.neo4j_session, user_id=self.user_id,
cloud_id=self.cloud_id,
vpc=True)
self.classic_schema = self.neo4j_client_classic.schema
self.vpc_schema = self.neo4j_client_vpc.schema
self.migration_nodes = self.neo4j_client_classic.get_labels(migration=True)
self.vpc_nodes = self.neo4j_client_vpc.get_labels(vpc=True)
def _get_db_tool(self):
vpc_cost_optimization_tool = {
"name": "vpc_cost_optimization_tool",
"description": "This is a helper tool. It assists in retrieving service cost analysis and cost spent over last 12 months. Use this tool whenever there is an overall cost analysis involved.",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "queries like 'What is the spending of my cloud account'"}
}
}
}
softlayer_infra_assessment_tool = {
"name": "softlayer_infra_assessment_tool",
"description": "This is a helper tool. It assists in retrieving the high-level assessments of classic infrastructures and identifying potential cost optimization opportunities from the database. Use this tool to fetch information regarding the high-level assessments of classic infrastructures and identifying potential cost optimization opportunities for migration to IBM Cloud VPC",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "queries like 'Can you assessment my current classic infrastructure and suggest how can I optimize my infrastructure?'"}
}
}
}
vpc_db_tool = {
"name": "vpc_db_tool",
"description": "This is a helper tool. It assists in retrieving vpc cloud resources from the database"
f"use this tool to fetch information regarding resources like {self.vpc_nodes} in natural language. It only accepts queries in natural language. Don't generate cypher",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "queries like 'What are the names of all the companies' in natural language"}
}
}
}
backup_recommendation_db_tool = {
"name": "ibm_resources_that_have_no_backup_db_tool",
"description": """This tool identifies resources that have no backup.
WARNING: THIS IS A RESTRICTED TOOL - DO NOT USE FOR SPECIFIC QUERIES!
This tool ONLY accepts these exact queries:
- "Show all resources without backup"
- "Give me a complete backup gap analysis"
- "List all unprotected resources"
STRICT LIMITATIONS:
❌ DO NOT USE for:
- Queries about idle resources or resource usage
- Specific service queries (like VPC, K8s, VSI specific)
- Filtered or conditional queries
- Resource-specific backup status
- Any detailed analysis
- Idle resource related queries
- Right sizing recommendation queries
PURPOSE:
This is a time-saving utility that runs 5-6 predefined queries simultaneously to get a general overview of unprotected resources. It CANNOT process specific or filtered queries.
IMPORTANT:
→ For specific queries like "show VPCs without backup" or "K8s clusters without backup" - USE vpc_db_tool instead
→ For detailed backup analysis - USE vpc_db_tool instead
→ For service-specific queries - USE vpc_db_tool instead
→ For idle resource specific queries - USE vpc_db_tool instead
This tool will return incorrect or hallucinated results if used for anything other than general overview queries.""",
"input_schema": {
"type": "object",
"properties": {
"resource_type": {
"type": "string",
"enum": ["vsi", "vpc", "k8s", "cos", "all"],
"description": "Type of resource to check for backup protection"
}
}
}
}
classic_db_tool = {
"name": "classic_db_tool",
"description": "This is a helper tool. It assists in retrieving classic cloud resources from the database"
f"use this tool to fetch information regarding resources like {self.migration_nodes}",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "queries like 'What are the names of all the companies'"}
}
}
}
pagination_db_tool = {
"name": "pagination_db_tool",
"description": """This is a helper tool. It helps to fetch next page of a requested resources. Analyze the conversation history carefully before generating a query. Consider the following points:
1. Pagination: If this is a follow-up request, determine which page of results the user needs next.
2. Specificity: Take a close at chat_history and create a precise query that reflects the user's current request by determining that how many records are already shown they are to be skipped. Avoid generic 'show all' queries.
3. Context: Incorporate any relevant context from previous interactions.
4. Limits: Remember that the db_tool can't fetch all records at once. Specify a reasonable LIMIT (e.g., 20 items per page)
5. Request for listing all items of a resource: Review your history, if user is requesting all items of a resources whose count is greater than 25 than avoid creating such query for db_tool.""",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "queries like 'Fetch the next 20 [resource_type] records skipping the numbers of records they are already shown"}
}
}
}
logger.info("%" * 100)
logger.info(f"CLOUD TYPE--->{self.cloud_type}")
logger.info("%" * 100)
if self.cloud_type == 'ibmCloudAccounts':
logger.info("$" * 100)
logger.info(f"CLOUD TYPE--->{self.cloud_type}")
logger.info("$" * 100)
return [vpc_db_tool, pagination_db_tool, vpc_cost_optimization_tool, backup_recommendation_db_tool]
elif self.cloud_type == 'ibmSoftlayerCloudAccounts':
return [classic_db_tool, pagination_db_tool, softlayer_infra_assessment_tool]
else:
return [vpc_db_tool, classic_db_tool, pagination_db_tool, backup_recommendation_db_tool]
async def start(self, query: str):
response = self.get_response_from_llm(query)
content = await self.process_response(response)
formatted_content = self.format_response(content)
logger.info('printing formatted content')
logger.info(formatted_content)
return formatted_content
def get_response_from_llm(self, query=None):
if query:
self.base_llm.add_message(role='user',
content=f'{query} \nInternal Feedback: Take a moment to take a long breath and do not hallucinate')
try:
if self.cloud_type == 'ibmCloudAccounts':
self.system_prompt = SYSTEM.format(
cost_tool_opt="d) vpc_cost_optimization_tool: To generate cost reports and retrieve service costs,"
" and explain cost trends.",
cost_tool_instructions="10. For all cost related queries like cost per "
"service information or cost trend related details are handled by "
"vpc_cost_optimization_tool.",
ibm_resources_that_have_no_backup_db_tool="e) ibm_resources_that_have_no_backup_db_tool: Generates a backup recommendation report for IBM Cloud resources that are not backed up, based on the resource type specified in the request (e.g., IBM VPC, IBM VSI, IBM IKS, or IBM COS buckets). If no specific resource type is provided, the report will include all un-backed IBM resources by default.",
unbacked_resources_recommendation_db_tool_instructions="12. For backup recommendations targeting specific IBM Cloud resources such as VPC, VSI, IKS, or COS buckets, use the ibm_resources_that_have_no_backup_db_tool.When the user mentions a specific resource type (e.g., VPC or VSI or COS bucket or IKS), limit the recommendations to that type.",
cost_tool_tag=vpc_cost_tool_tag,
vpc_schema=self.vpc_schema,
classic_schema=self.classic_schema
)
elif self.cloud_type == 'ibmSoftlayerCloudAccounts':
self.system_prompt = SYSTEM.format(
cost_tool_opt="d) softlayer_infra_assessment_tool: To generate assessments report and retrieve the high-level assessments of classic infrastructures and identifying potential cost optimization opportunities for migration to IBM Cloud VPC.",
cost_tool_instructions="10. IBM Softlayer Cloud Resources assessment data retrieved by softlayer_infra_assessment_tool.",
cost_tool_tag=classic_infra_assessments_tool_tag,
unbacked_resources_recommendation_db_tool="",
unbacked_resources_recommendation_db_tool_instructions="If the user asks for backup recommendations related to IBM Classic infrastructure,respond with: Backup recommendations are not available for IBM Classic infrastructure at this time.",
vpc_schema=self.vpc_schema,
classic_schema=self.classic_schema
)
else:
self.system_prompt = SYSTEM.format(
cost_tool_opt="",
cost_tool_instructions="",
cost_tool_tag="",
unbacked_resources_recommendation_db_tool="",
unbacked_resources_recommendation_db_tool_instructions="",
vpc_schema=self.vpc_schema,
classic_schema=self.classic_schema
)
system = [
{
"type": "text",
"text": self.system_prompt,
"cache_control": {"type": "ephemeral"}
}
]
chat_response = self.base_llm.process(tools=self._get_db_tool(), prompt_caching=True,
system=system)
# logger.info(self.system_prompt)
logger.info(chat_response)
if not chat_response:
logger.info("Received empty chat response")
return chat_response
except APIConnectionError as e:
logger.info(f"API connection error: {e}")
raise
except RateLimitError as e:
logger.info(f"Rate limit error: {e}")
except APIStatusError as e:
logger.info(f"API status error: {e}")
logger.info(f"Status code: {e.status_code}, Response: {e.response}")
async def process_response(self, response):
from app.main import app
# from app.web.common.chats_websockets_utils import check_cloud_account_status
# clean_bearer = self.bearer.replace("Bearer ", "")
# authorization = HTTPAuthorizationCredentials(scheme="Bearer", credentials=clean_bearer)
total_count = 0
# remaining_count = 0
if not response:
raise Exception("Invalid response from llm")
logger.info(response)
response_content = response['content']
if response['stop_reason'] == 'tool_use':
for single_response in response_content:
if single_response['type'] != 'tool_use':
continue
self.process_assistant_query(response_content)
function_args, function_name = single_response.get('input'), single_response['name']
logger.info("-" * 100)
logger.info(f"TOOL NAME ----->{function_name}")
logger.info(f"function ARGS: {function_args}")
# TODO: We should look into our database if data exists against our cloud account or not"
# if function_name == 'vpc_db_tool':
# api_endpoint = "/v1/ibm/clouds"
# tool = "InformationRetrievalAction"
# db_session=app.state.neo4j_session
# logger.info("Checking cloud account status for VPC...")
# cloud_account_check_response = await check_cloud_account_status(db_session, self.chat_id, api_endpoint, tool, authorization=authorization)
# if cloud_account_check_response:
# return cloud_account_check_response
# elif function_name == 'classic_db_tool':
# api_endpoint = "/v1/softlayer/accounts"
# tool = "InformationRetrievalClassic"
# db_session=app.state.neo4j_session
# logger.info("Checking cloud account status for Classic...")
# cloud_account_check_response = await check_cloud_account_status(db_session, self.chat_id, api_endpoint, tool, authorization=authorization)
# if cloud_account_check_response:
# return cloud_account_check_response
logger.info("-" * 100)
if function_name == "ibm_resources_that_have_no_backup_db_tool":
resource_type = function_args['resource_type']
result = {}
queries = get_backup_queries(self.cloud_id, resource_type)
neo4j_client = self.neo4j_client_vpc
for resource_type, query in queries.items():
results = neo4j_client.query_database(query)
result[resource_type] = results if results else "No un-backed resources found"
self.process_tool_result(
tool_id=single_response['id'],
content=json.dumps(result))
elif 'db_tool' in function_name:
question = function_args['query']
if any(keyword in question for keyword in ['MATCH', 'match', 'SELECT']):
feedback = f'Internal feedback: The function argument you generated for {function_name} was {question},\
but it appears to be in an incorrect format. To ensure proper functionality, please generate \
a natural language query for the db_tool instead of using Cypher query language. Respond with \
a valid JSON object containing a single key named "query" and its corresponding value as a \
string. The value should represent the natural language query you generated based on the \
given question.'
self.process_tool_result(tool_id=single_response['id'], content=feedback, is_error=True)
break
if '$' in question:
feedback = "The generated query contains placeholder variables ($region and $tag) which are not compatible with direct use in Neo4j queries. Neo4j doesn't automatically interpret these placeholders. Instead, please provide specific values for these parameters or use Cypher's built-in parameter syntax if parameter passing is supported in the execution context. For example, use actual values like 'us-south' for region and 'production' for tag, or use Cypher's parameter syntax like {region} and {tag} if the execution environment supports parameter passing. Avoid using $ prefixed variables in the final query sent to the Neo4j client."
self.process_tool_result(tool_id=single_response['id'], content=feedback, is_error=True)
break
regions = ibm_region_mapper.keys()
question_lower_case = question.lower()
for region in regions:
region_lower = region.lower()
pattern = r'(?<![\w-]){}(?![\w-])'.format(re.escape(region_lower))
if re.search(pattern, question_lower_case):
question = re.sub(pattern, ibm_region_mapper[region], question, flags=re.IGNORECASE)
if "classic" in function_name:
neo4j_client = self.neo4j_client_classic
elif "vpc" in function_name:
neo4j_client = self.neo4j_client_vpc
else:
neo4j_client = Neo4j(db_session=app.state.neo4j_session, user_id=self.user_id,
cloud_id=self.cloud_id)
if function_name == 'pagination_db_tool':
result, cypher = neo4j_client.run(
question=question, pagination=True, chat_history=self.llm_chat_history
)
else:
result, cypher = neo4j_client.run(
question=question
)
if cypher and cypher != 'empty':
logger.info(cypher)
count_query = generate_count_query(cypher)
logger.info(count_query)
total_count, _ = neo4j_client.run(cypher=count_query)
logger.info(total_count)
if total_count:
total_count = sum([count[0] for count in total_count[1:]])
resource_shown = calculate_resource_shown(cypher_query=cypher)
remaining_count = total_count - (resource_shown - 20) - (len(result) - 1)
logger.info(
"================================ Total Count in 1st Go =========================")
logger.info(total_count)
logger.info(f"remaining_count: {remaining_count}")
logger.info(
"================================ Total Count in 1st Go =========================")
if cypher == 'empty':
content = f"db_tool returned empty result. Please plan next step accordingly or try another way to query"
elif total_count and total_count <= 20:
if len(self.chat_history) <= 1:
content = f"cypher: {cypher} and result:{result}. \n If you have find all info regarding user query and ready to present then act as support agent and then greet user well and prepare an professional, concise and properly formatted response for user regarding his query including all necessary info needs to present to user. Don't mention that here are the first 20 resources instead say here are the total {total_count} resources"
else:
content = f"cypher: {cypher} and result:{result}. If you have find all info regarding user query and ready to present then act as support agent and prepare an professional, concise and properly formatted response for user regarding his query including all necessary info needs to present to user. Don't mention that here are the first 20 resources instead say here are the total {total_count} resources. "
else:
logger.info(len(self.chat_history))
if len(self.chat_history) <= 1:
if not total_count:
total_count = "Couldn't find out total count. Plan out accordingly"
content = f"""cypher: {cypher}
result: {result}
total_count: {total_count}
Internal feedback: take a moment and carefully analyze the following:
1. User's most recent query
2. Entire chat history
3. Results from the db_tool
Plan your next step based on this analysis. Remember:
1. Show the exact data of the result and don't add extra records by yourself if result data is less than 20.
2. Users cannot see db_tool results unless you explicitly present them.
3. Inform users about the pagination and Encourage users to provide more specific criteria for targeted searches if required
4. Maintain a professional tone and do not mention internal tools or processes.
5. Don't forget to greet user as this users first message
Execute this analysis and planning precisely to ensure optimal user interaction and efficient data retrieval"""
else:
content = f"""cypher: {cypher}
result: {result}
total_count: {total_count}
Internal feedback: take a moment and carefully analyze the following:
1. User's most recent query
2. Entire chat history
3. Results from the db_tool
Plan your next step based on this analysis. Remember:
1. Show the exact data of the result and don't add extra records by yourself if result data is less than 20.
2. Users cannot see db_tool results unless you explicitly present them.
3. Inform users about the pagination and Encourage users to provide more specific criteria for targeted searches if required
4. Maintain a professional tone and do not mention internal tools or processes.
Execute this analysis and planning precisely to ensure optimal user interaction and efficient data retrieval"""
self.process_tool_result(
tool_id=single_response['id'],
content=content)
elif "vpc_cost_optimization_tool" in function_name:
from app.models.cost_report import CloudCostReport
import traceback
execute_cost_apis = False
question = function_args['query']
async with db_deps.get_db_session_async_context() as db_client:
try:
cloud_report = await db_client.scalar(
select(CloudCostReport).filter_by(cloud_id=self.cloud_id, user_id=self.user_id))
one_hour_ago = datetime.now(timezone.utc) - timedelta(hours=1)
if cloud_report:
if 'error' in str(cloud_report.resource_json['cost_payload']):
execute_cost_apis = True
if not cloud_report or cloud_report.created_at < one_hour_ago or execute_cost_apis:
service_cost_endpoint = "v1/ibm/clouds/costs"
cost_trend_endpoint = f"v1/ibm/clouds/{self.cloud_id}/cost-tracking"
month = self.get_month()
params = {
"month": month,
"cloud_id": self.cloud_id
}
logger.debug(settings.web.AUTH_LINK + f"/{service_cost_endpoint}")
cost_payload = execute_paginated_api(
url=settings.web.AUTH_LINK + f"/{service_cost_endpoint}",
headers={"Authorization": self.bearer},
params=params)
logger.debug(cost_payload)
params['granularity'] = 'monthly'
cost_trend_payload = execute_paginated_api(
url=settings.web.AUTH_LINK + f"/{cost_trend_endpoint}",
headers={"Authorization": self.bearer},
params=params)
cost_resource_json = {
"cost_trend_payload": cost_trend_payload,
"cost_payload": cost_payload
}
if cloud_report:
# Update the existing report
cloud_report.id = cloud_report.id
cloud_report.user_id = cloud_report.user_id
cloud_report.cloud_id = cloud_report.cloud_id
cloud_report.resource_json = cost_resource_json
cloud_report.created_at = func.now() # Update timestamp to now
else:
# Create a new report if it doesn't exist
cloud_report = CloudCostReport(cloud_id=self.cloud_id, resource_json=cost_resource_json,
user_id=self.user_id)
db_client.add(cloud_report)
await db_client.commit()
cost_resource_json = cloud_report.resource_json
logger.info(f"Cost json ---- >\n{cost_resource_json}")
logger.info("EXECUTING APIs")
cost_payload = cost_resource_json['cost_payload']
cost_trend_payload = cost_resource_json["cost_trend_payload"]
if 'error' in str(cost_payload):
content = f"Cost optimization is not enables on the selected cloud account. Please visit {settings.web.AUTH_LINK}/cloud-accounts/ibm-cloud and enable cost optimization on your cloud. "
else:
content = VPC_COST_OPTIMIZATION_TEMPLATE.format(
query=question,
service_cost=cost_payload if cost_payload != 204 else None,
cost_trend_payload=cost_trend_payload if cost_trend_payload != 204 else None)
logger.debug(f"COST TEMPLATE:\n "
f"{content}")
except ValueError as e:
logger.error(f"An error occurred in get_vpc_cost_optimization_report: {str(e)}")
content = f"An error occurred in get_vpc_cost_optimization_report: {str(e)}"
except Exception as e:
content = f"An error occurred while retrieving information: {traceback.format_exc()} ->{str(e)}"
logger.error(
f"An error occurred while retrieving information: {traceback.format_exc()} ->{str(e)}")
self.process_tool_result(
tool_id=single_response['id'],
content=content)
elif "softlayer_infra_assessment_tool" in function_name:
from app.models.cost_report import CloudCostReport
import traceback
question = function_args['query']
async with db_deps.get_db_session_async_context() as db_client:
try:
# Step 1: Check if the cost report is older than 1 hour
cloud_report = await db_client.scalar(
select(CloudCostReport).filter_by(cloud_id=self.cloud_id, user_id=self.user_id))
one_hour_ago = datetime.now(timezone.utc) - timedelta(hours=1)
if not cloud_report or cloud_report.created_at < one_hour_ago:
headers = {"Authorization": self.bearer}
cost_resource_json = await get_softlayer_cloud_cost_response(headers, self.cloud_id)
if cloud_report:
# Update the existing report
cloud_report.id = cloud_report.id
cloud_report.cloud_id = cloud_report.cloud_id
cloud_report.resource_json = cost_resource_json
cloud_report.created_at = func.now() # Update timestamp to now
else:
# Create a new report if it doesn't exist
cloud_report = CloudCostReport(cloud_id=self.cloud_id, user_id=self.user_id,
resource_json=cost_resource_json)
db_client.add(cloud_report)
await db_client.commit()
cost_resource_json = cloud_report.resource_json
# Step 3: Use the (possibly updated) cloud report data
content = SOFTLAYER_INFRA_ASSESSMENT_PROMPT.format(
query=question,
ibm_softlayer_cloud_payload=cost_resource_json
)
except ValueError as e:
logger.error(f"An error occurred in get_vpc_cost_optimization_report: {str(e)}")
content = f"An error occurred in get_vpc_cost_optimization_report: {str(e)}"
except Exception as e:
content = f"An error occurred while retrieving information: {traceback.format_exc()} ->{str(e)}"
logger.error(
f"An error occurred while retrieving information: {traceback.format_exc()} ->{str(e)}")
self.process_tool_result(
tool_id=single_response['id'],
content=content)
# Fetching new response after handling tool call
new_response = self.get_response_from_llm()
return await self.process_response(new_response)
elif response_content and response_content[0].get('text'):
logger.info("Processing user control response")
self.process_assistant_query(response_content)
user_message = response_content[0]['text']
if ('Thought:' in user_message or 'Hallucination:' in user_message) and 'Response:' not in user_message:
logger.info("******** Response key is missing in response, Retrying to get response from LLM *********")
self.base_llm.add_message(role='user',
content=f'Internal Feedback: You did not responded correctly and missed'
f' generating response in Response: key. Do not generate it as a tag'
f' like <Response> instead generate the response with key Response:')
retry_response = self.get_response_from_llm()
logger.info(retry_response)
return await self.process_response(retry_response)
elif "Response:" in user_message:
user_message = user_message.rsplit('Response:')[1]
logger.info(user_message)
return user_message
else:
return user_message
else:
raise Exception(f"Unexpected response format {response}")
def process_user_query(self, query):
self.base_llm.add_message(role=WHISPER_USER_ROLE, content=query)
def process_assistant_query(self, query):
self.base_llm.add_message(role=WHISPER_ASSISTANT_ROLE, content=query)
def process_tool_result(self, tool_id, content, is_error=False):
content = [{
"type": "tool_result",
"tool_use_id": tool_id,
"content": content,
"is_error": is_error
}]
self.base_llm.add_message(role=WHISPER_USER_ROLE, content=content)
def format_response(self, content):
try:
from app.whisper.llms.groq import GroqLLM
logger.info('**************')
logger.info(content)
logger.info('**************')
formatting_bot = GroqLLM()
formatting_bot.add_message(role="system",
content=DB_RESULT_FORMAT_PROMPT.format(content=content))
streaming_obj = formatting_bot.process()
return streaming_obj
except Exception as e:
logger.error(f"Error during formatting response: {e}")
return content
def get_month(self):
import datetime
# Get the current date
current_date = datetime.datetime.now()
# Get the current month name
current_month = current_date.strftime("%B")
return current_month
|
CloudWhisperCustomBot | app/whisper/old_implementation/phases/__init__.py | from app.whisper.old_implementation.phases.action.action_phase import ActionPhase
__all__ = ["ActionPhase"]
|
CloudWhisperCustomBot | app/whisper/old_implementation/phases/action/action_phase.py | import json
from urllib.parse import urlparse
from loguru import logger
from requests import Response
from app.whisper.llms.openai import OpenAILLM
from app.whisper.old_implementation.validators import JSONValidator
from app.whisper.utils.openapi.conversion_utils import openapi_spec_to_openai_fn
from app.whisper.utils.openapi.spec import OpenAPISpec
def extract_path_from_url(url):
parsed_url = urlparse(url)
return parsed_url.path.lstrip("/")
with open('app/api/v1/endpoints/api_path_to_fields.json', 'r') as file:
api_path_to_fields_dict = json.load(file)
def parse_response(response_data, api_path, method, intent):
logger.info(f"api path is {api_path}")
logger.info(f"api path dic {api_path_to_fields_dict}")
if api_path not in api_path_to_fields_dict[intent] or method not in api_path_to_fields_dict[intent][api_path]:
return []
method_config = api_path_to_fields_dict[intent][api_path][method]
fields_to_extract = method_config.get("fields", [])
nested_fields = method_config.get("nested_fields", {})
if not fields_to_extract:
return []
if 'items' in response_data:
response_data = response_data["items"]
elif isinstance(response_data, dict):
response_data = [response_data]
parsed_data_list = []
for item in response_data:
parsed_item = {}
for field in fields_to_extract:
if isinstance(item, dict):
parsed_item[field] = item.get(field)
else:
parsed_item[field] = item
for nested_field, nested_keys in nested_fields.items():
nested_data = item.get(nested_field)
if nested_data:
if isinstance(nested_data, list): # Check if nested data is a list
parsed_nested_list = []
for nested_item in nested_data:
parsed_nested = {key: nested_item.get(key) for key in nested_keys}
parsed_nested_list.append(parsed_nested)
parsed_item[nested_field] = parsed_nested_list
elif isinstance(nested_data, dict): # If it's a dictionary
parsed_nested = {key: nested_data.get(key) for key in nested_keys}
parsed_item[nested_field] = parsed_nested
parsed_data_list.append(parsed_item)
return parsed_data_list
def parser(response: Response, intent: str) -> list | str:
logger.info(f"URL=>{response.request.url}")
method = response.request.method.upper()
path = extract_path_from_url(response.request.url)
if len(path.split('/')[-1]) == 32:
path = path[:-32] + 'temp'
logger.info(f"Path => {path}")
logger.info(f"Method => {method}")
logger.info("<<<<<<<<<<<<<<<<RESPONSE>>>>>>>>>>>>>>>>>")
logger.info(response.content)
logger.info("<<<<<<<<<<<<<<<<RESPONSE>>>>>>>>>>>>>>>>>")
if response.status_code != 200:
logger.info("*" * 100)
logger.info(f"Response({response.status_code}) =>{response.content}")
logger.info("*" * 100)
logger.info("\n\n")
return str(response.content)
data = response.json()
return parse_response(data, path, method, intent)
class ActionPhase:
def __init__(self, intent: list, messages: list, bearer: str):
self.openai_tools = []
self.base_llm = OpenAILLM()
self.extractor_llm = OpenAILLM()
self.ibm_vpcs_openai_functions, self.ibm_vpcs_callables = self._load_openapi_tools()
self.messages = messages
self.intent = intent
self.task_finished = False
self.bearer = bearer
def extract_json_from_messages(self, relevant_messages):
intent_based_schema = self.intent[-1]['method']['schema']
d_system_prompt = """You are expert in analyzing chat history between user and ai and producing JSON.
Given the chat history and JSON Schema below, generate a JSON response that have populated the JSON schema fields'
values. Extract ONLY the information presented in the initial request. Only update field values which is provided by user.
Don't add anything else. Avoid Hallucinating the fields and values.
Return partial information if something is missing. For the missing values, use <missing> as the placeholder.
Always follow the schema below. If the question is greetings, even then you should follow the schema and add <missing>
as the placeholder for the field values.
JSON Schema
####
{to_be_extracted}
####
""".format(to_be_extracted=intent_based_schema)
messages = [{"role": "system",
"content": d_system_prompt}]
messages.extend(relevant_messages)
def schema_construct(messages):
chat_re = self.extractor_llm.process(
messages, response_format_type="json"
)
if chat_re.status_code != 200:
logger.info(f'There seems to be a problem -> {chat_re.content}')
schema_construct(messages)
return chat_re.json()['choices'][0]['message']
a_message = schema_construct(messages)
if a_message['content']:
extracted_json = json.loads(a_message['content'])
return extracted_json
def extract_and_validate(self):
# relevant_messages = self.get_relevant_messages()
# extracted_json = self.extract_json_from_messages(relevant_messages)
# logger.info("Extracted JSON: \n{}".format(extracted_json))
# TODO need to work on validations
# errors = self.validate_extracted_json(extracted_json)
errors = []
logger.info("------------------------- errors ---------------------------")
logger.info(errors)
logger.info("------------------------- errors ---------------------------")
return errors
def _load_openapi_tools(self):
spec = OpenAPISpec.from_file("app/whisper/flow/phases/action/specs/ibm_draas_backup_openapi_spec.json")
ibm_vpcs_openai_functions, ibm_vpcs_callables = openapi_spec_to_openai_fn(spec)
return ibm_vpcs_openai_functions, ibm_vpcs_callables
def load_intent_based_openapi_tools(self):
if not self.openai_tools:
searched_intent = self.intent
metadata = searched_intent[-1]
data = metadata['method']['tools']
tools = []
for api_func in self.ibm_vpcs_openai_functions:
if api_func['name'] not in data:
continue
func_body = {
"type": "function",
"function": api_func
}
tools.append(func_body)
self.openai_tools = tools
return self.openai_tools
def assistant_llm(self, query, tools, tool_choice):
self.process_user_query(query)
chat_response = self.base_llm.process(self.messages, tools, tool_choice)
if chat_response.status_code != 200:
logger.info("+" * 100)
logger.info(self.messages)
logger.info("+" * 100)
logger.info("\n\n")
logger.info("X" * 100)
logger.info(f"chat_response=>{str(chat_response.content)}")
logger.info("X" * 100)
logger.info("\n\n")
raise ConnectionAbortedError("Something wrong with your internet connection.")
assistant_message = chat_response.json()['choices'][0]['message']
return assistant_message
def get_relevant_messages(self):
relevant_messages = []
for message in self.messages:
if message['role'] == 'user' or \
(message['role'] == 'assistant' and message['content'] and not message.get('tool_calls')):
relevant_messages.append(message)
return relevant_messages
def validate_extracted_json(self, extracted_json):
headers = {
"Authorization": self.bearer,
"accept": "application/json",
"Content-Type": "application/json"
}
errors = JSONValidator(intent=self.intent, headers=headers).validate_json(extracted_json)
return errors
def start(self, query: str):
response = self.assistant_llm(query, [self.load_extract_and_validate_tool()],
tool_choice="extract_and_validate")
content = self.handle_assistant_response(response)
return f"{content}\ntask finished:{self.task_finished}"
def load_extract_and_validate_tool(self):
with open('app/whisper/flow/phases/action/config/extract_and_validate_tool.json',
'r') as extract_and_validate_tool_file:
extract_and_validate_tool = json.load(extract_and_validate_tool_file)
return extract_and_validate_tool
def handle_assistant_response(self, response):
if response['content']:
self.messages.append(response)
return response['content']
if response['tool_calls'] and len(response['tool_calls']) == 1:
logger.info(f"Tool called {response['tool_calls'][0]['id']}")
self.messages.append(response)
function = response['tool_calls'][0]['function']
if function['name'] == "extract_and_validate":
logger.info("TOOL NAME - > extract_and_validate")
self.handle_extract_and_validate(response)
return self.handle_extract_and_validate_response()
else:
self.handle_tool_call(response)
return self.handle_tool_call_response(self.load_intent_based_openapi_tools())
def handle_extract_and_validate(self, assistant_message):
errors = self.extract_and_validate()
function = assistant_message["tool_calls"][0]["function"]
self.messages.append({
"role": "tool",
"tool_call_id": assistant_message["tool_calls"][0]['id'],
"name": function["name"],
"content": json.dumps(errors)
})
def handle_extract_and_validate_response(self):
chat_response = self.base_llm.process(self.messages, tools=self.load_intent_based_openapi_tools())
if chat_response.status_code != 200:
logger.info(chat_response.content)
exit(1)
assistant_message = chat_response.json()['choices'][0]['message']
self.messages.append(assistant_message)
if assistant_message['content'] and not assistant_message.get("tool_calls"):
content = assistant_message['content']
formatted_content = content.replace("\n", "<br>")
return formatted_content
self.handle_tool_call(assistant_message)
return self.handle_tool_call_response(self.load_intent_based_openapi_tools())
def handle_tool_call(self, assistant_message):
function = assistant_message["tool_calls"][0]["function"]
headers = {'Authorization': self.bearer}
logger.info("-----ARGUMENTS------")
logger.info(json.loads(function['arguments']))
response: Response = self.ibm_vpcs_callables(
name=function['name'], fn_args=json.loads(function['arguments']), headers=headers
)
searched_intent = self.intent
metadata = searched_intent[-1]
logger.info(
f'function name: {function["name"]}\n status: {response.status_code} \n last api function: {metadata["method"]["last_api_function"]} \n function arguments: {json.loads(function["arguments"])}')
if function['name'] == metadata['method']['last_api_function'] and \
(int(response.status_code) == 200 or int(response.status_code) == 202):
self.task_finished = True
logger.info('#' * 20)
logger.info(f'status changed to {self.task_finished}')
logger.info('#' * 20)
if response.status_code != 200:
result = response.status_code, response.content
logger.info("******************************** Failed => Called With following **************************")
logger.info(f"func_name={function['name']} -- args={function['arguments']}")
logger.error(f"Failed Response => {result}")
logger.info("******************************** Failed => Called With following **************************")
logger.info("\n\n")
parsed_data = parser(response, self.intent[0])
logger.info("==================== Parsed Data ============================")
logger.info(parsed_data)
logger.info("==================== Parsed Data ============================")
tool_output = json.dumps(parsed_data)
SYSTEM_NOTE = """The following is the result returned after I have called tool name:'{tool_name}':\nresult: {tool_output}
Display the complete result to the user in a nice and understandable format. Display the summary of previous fields i.e names and their ID selected by user in end, giving it a heading of Summary.
"""
self.messages.append({
"role": "tool",
"tool_call_id": assistant_message["tool_calls"][0]['id'],
"name": function["name"],
"content": SYSTEM_NOTE.format(tool_output=tool_output, tool_name=function['name'])
})
for m in self.messages:
logger.info("-" * 40)
logger.info(m)
logger.info("-" * 40)
def handle_tool_call_response(self, tools):
chat_response = self.base_llm.process(self.messages, tools=tools)
if chat_response.status_code != 200 or chat_response.status_code != 202:
logger.info("#" * 100)
logger.info(str(chat_response.status_code) + '\n' + str(chat_response.content))
logger.info("#" * 100)
try:
assistant_message = chat_response.json()['choices'][0]['message']
except Exception as ex:
logger.info("%%%%%%%%%%%%%%%%%%%%%%%% handle_tool_call_response %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
logger.info(chat_response.content)
logger.info("%%%%%%%%%%%%%%%%%%%%%%%% handle_tool_call_response %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
raise ex
self.messages.append(assistant_message)
if assistant_message['content'] and not assistant_message.get("tool_calls"):
content = assistant_message['content']
formatted_content = content.replace("\n", "<br>")
return formatted_content
elif assistant_message['content'] is None and assistant_message['tool_calls'] and len(assistant_message[
'tool_calls']) == 1:
function = assistant_message["tool_calls"][0]["function"]
logger.info(f"Tool called {function['name']}")
self.handle_tool_call(assistant_message)
return self.handle_tool_call_response(self.load_intent_based_openapi_tools())
else:
logger.info("----------------------- Tool Content and Tool --------------------")
logger.info(f"Function : {assistant_message['tool_calls'][0]['function']}")
logger.info("----------------------- Tool Content and Tool --------------------")
self.handle_tool_call(assistant_message)
return self.handle_tool_call_response(self.load_intent_based_openapi_tools())
def process_user_query(self, query):
self.messages.append({"role": "user", "content": query})
|
CloudWhisperCustomBot | app/whisper/old_implementation/phases/action/specs/api_path_to_fields.json | {
"Create IBM VPC backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/vpcs": {
"GET": {
"fields": [
"id",
"name"
],
"nested_fields": {
"associated_resources": [
"instances"
]
}
}
}
},
"List IBM Clouds": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM VPC Networks": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/vpcs": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM Regions": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
}
},
"List IBM Draas backups": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/draas_blueprints": {
"GET": {
"fields": [
"id",
"name",
"backups"
],
"nested_fields": {
"backups": [
"id",
"name"
]
}
}
}
},
"Create IBM COS bucket backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/cloud_object_storages/buckets": {
"GET": {
"fields": [
"id",
"name",
"cloud_object_storage",
"cos_bucket_versioning",
"regions"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM COS buckets": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/cloud_object_storages/buckets": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"list IBM COS bucket instances": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List IBM Kubernetes Clusters": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/kubernetes_clusters": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"Create IBM IKS Cluster backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/kubernetes_clusters": {
"GET": {
"fields": [
"id",
"name",
"master_kube_version"
]
}
},
"v1/ibm/kubernetes_clusters/temp": {
"GET": {
"fields": [
"id",
"workloads"
]
}
},
"v1/ibm/cloud_object_storages/buckets": {
"GET": {
"fields": [
"id",
"name",
"cloud_object_storage",
"cos_bucket_versioning",
"regions"
]
}
},
"v1/ibm/cloud_object_storages/keys": {
"GET": {
"fields": [
"id",
"name",
"is_hmac"
]
}
}
},
"List IBM COS bucket credential keys": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/cloud_object_storages/keys": {
"GET": {
"fields": [
"id",
"name",
"is_hmac"
]
}
},
"v1/ibm/cloud_object_storages": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List a single IBM Kubernetes Cluster": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name",
"zones"
]
}
},
"v1/ibm/kubernetes_clusters": {
"GET": {
"fields": [
"id",
"name",
"master_kube_version"
]
}
},
"v1/ibm/kubernetes_clusters/temp": {
"GET": {
"fields": [
"id",
"workloads"
]
}
}
},
"List all IBM VSI Instances": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name",
"zones"
]
}
},
"v1/ibm/instances": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"Create IBM VSI backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name",
"zones"
]
}
},
"v1/ibm/instances": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/backup_policies": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List all IBM Backup Policies": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/backup_policies": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"List a single IBM VSI": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name",
"zones"
]
}
},
"v1/ibm/instances": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/instances/temp": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"Create scheduled IBM VSI backup": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/instances": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/backup_policies": {
"GET": {
"fields": [
"id",
"name"
]
}
}
},
"Restore IBM IKS Cluster backup in existing IKS CLuster": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/draas_blueprints": {
"GET": {
"fields": [
"id",
"name",
"backups",
"resource_metadata"
],
"nested_fields": {
"backups": [
"id",
"name"
]
}
}
},
"v1/ibm/kubernetes_clusters": {
"GET": {
"fields": [
"id",
"name",
"master_kube_version"
]
}
}
},
"Restore IBM IKS Cluster backup in existing IBM VPC": {
"v1/ibm/clouds": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/geography/regions": {
"GET": {
"fields": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/draas_blueprints": {
"GET": {
"fields": [
"id",
"name",
"backups"
],
"nested_fields": {
"backups": [
"id",
"name"
],
"resource_metadata": ["cluster_id", "blueprint_name"]
}
}
},
"v1/ibm/vpcs": {
"GET": {
"fields": [
"id",
"name"
],
"nested_fields": {
"associated_resources": [
"subnets"
]
}
}
},
"v1/ibm/resource_groups": {
"GET": {
"fields": [
"id",
"name"
]
}
},
"v1/ibm/subnets": {
"GET": {
"fields": [
"id",
"name",
"zone"
]
}
}
}
} |
CloudWhisperCustomBot | app/whisper/old_implementation/phases/action/config/extract_and_validate_tool.json | {
"type": "function",
"function": {
"name": "extract_and_validate",
"description": "Extract schema from the given chat history and validate it against validation criteria",
"parameters": {
"type": "object",
"properties": {}
}
}
}
|
CloudWhisperCustomBot | app/whisper/old_implementation/phases/action/config/validation_criteria.json | {
"id": {
"validation_fn": "is_valid_uuid"
},
"name": {
"validation_fn": "is_alphanumeric"
},
"ibm_cloud": {
"available_resources": "v1/ibm/clouds"
},
"region": {
"available_resources": "v1/ibm/geography/regions"
},
"ibm_vpc": {
"available_resources": "v1/ibm/vpcs"
},
"ibm_cos_bucket": {
"available_resources": "v1/ibm/cloud_object_storages/buckets"
},
"ibm_cos_bucket_instance": {
"available_resources": "v1/ibm/cloud_object_storages"
}
} |
CloudWhisperCustomBot | app/whisper/old_implementation/phases/action/config/dependencies.json | {
"Create IBM VPC backup": {
"region": ["ibm_cloud"],
"ibm_vpc": ["ibm_cloud"]
},
"Create IBM COS bucket backup": {
"region": ["ibm_cloud"],
"ibm_cos_bucket_instance": ["ibm_cloud"],
"bucket_id": ["ibm_cloud", "cloud_object_storage_id"]
},
"List IBM COS buckets": {
"region_id": ["cloud_id"],
"cloud_object_storage_id": ["cloud_id"]
}
}
|
CloudWhisperCustomBot | app/whisper/old_implementation/validators/__init__.py | from .request_json_validator import JSONValidator
__all__ = ['JSONValidator']
|
CloudWhisperCustomBot | app/whisper/old_implementation/validators/request_json_validator.py | import re
import uuid
import requests
from app.whisper.utils.config_reader import ConfigLoader
from loguru import logger
alphanumeric_error = "'{value}' for the '{key}' attribute must be alphanumeric."
uuid_error = "'{value}' for the '{key}' attribute must be a valid UUID."
dependency_error_msg = "{dependencies} must be provided before '{key}'."
BASE_URL = "https://vpc-stage.wanclouds.net/{endpoint}"
class JSONValidator:
def __init__(self, intent: str, headers: dict):
self.intent = intent
self.headers = headers
self.validation_config, self.dependencies = ConfigLoader(self.intent).load_configs()
if not (self.validation_config and self.dependencies):
raise RuntimeError(f"Please add the '{self.intent}' in the validation_config.json and dependencies.json "
f"files.")
def is_alphanumeric(self, value):
return bool(re.match(r"^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$", value))
def is_valid_uuid(self, value):
try:
uuid_obj = uuid.UUID(value)
return str(uuid_obj) == value
except ValueError:
return False
def _fetch_resources(self, field, data):
endpoint = self.validation_config[field]["available_resources"]
if endpoint:
dependencies = self.dependencies.get(field)
if dependencies and isinstance(dependencies, list):
query_params = {dep: data.get(dep) for dep in dependencies if data.get(dep)}
query_string = '&'.join([f"{key}={value}" for key, value in query_params.items()])
endpoint += f"?{query_string}"
# Make the API call using the constructed endpoint
url = BASE_URL.format(endpoint=endpoint)
logger.info(f"URL -> {url}")
response = requests.get(url, headers=self.headers)
if response.status_code == 200:
return response.json()
return []
def validate_json(self, data):
errors = []
validation_functions = {
"is_alphanumeric": self.is_alphanumeric,
"is_valid_uuid": self.is_valid_uuid,
}
validation_functions_to_errors = {
"is_alphanumeric": alphanumeric_error,
"is_valid_uuid": self.is_valid_uuid,
}
if "json" in data:
data = data["json"]
for key, value in data.items():
if value == "<missing>":
continue
logger.info('$' * 30)
logger.info(f"Processing key: {key}, value: {value}")
logger.info('$' * 30)
if isinstance(value, dict) and len(value.keys()) > 2:
return self.validate_json(value)
logger.info('#'*30)
field_config = self.validation_config.get(key)
logger.debug(f"Field config for {key}: {field_config}")
dependencies = self.dependencies.get(key, [])
logger.info(f"Dependencies for {key}: {dependencies}")
logger.info('#'*30)
logger.info("--------------------- validate_json --------------------")
logger.info(f"{key} - {field_config}")
logger.info("--------------------- validate_json --------------------")
if field_config:
# TODO: change this
dependencies = self.dependencies.get(key, [])
if dependencies:
missing_dependencies = [dep for dep in dependencies if data.get(dep) == "<missing>"]
if missing_dependencies:
errors.append(
dependency_error_msg.format(key=key, dependencies={', '.join(missing_dependencies)})
)
continue
validation_fn_name = field_config.get("validation_fn")
if validation_fn_name and validation_fn_name in validation_functions:
validation_fn = validation_functions[validation_fn_name]
validation_error = validation_functions_to_errors[validation_fn_name]
if not validation_fn(value):
errors.append(validation_error.format(key=key, value=value))
if dependencies:
for dep in dependencies:
if data.get(dep) == "<missing>":
errors.append(f"Validation failed for '{key}'. Dependency '{dep}' is missing.")
break
# if dependencies:
if "available_resources" in field_config:
fetched_resources = self._fetch_resources(key, data)
if isinstance(fetched_resources, dict) and "items" in fetched_resources:
fetched_resources = fetched_resources["items"]
if not fetched_resources:
errors.append(
f"Validation failed for '{key}' with value '{value}'. No available resource found.")
continue
fetched_resources_names = []
for resource in fetched_resources:
if "resource_json" in resource:
fetched_resources_names.append(resource['resource_json']['name'])
else:
fetched_resources_names.append(resource['name'])
if value not in fetched_resources_names:
errors.append(f"No '{key}' with value '{value}' found. Please check the available '{key}'")
# Filter out errors for fields with value '<missing>'
errors = [error for error in errors if '<missing>' not in error]
return errors
|
CloudWhisperCustomBot | app/whisper/llms/openai.py | import loguru
import requests
from dotenv import load_dotenv
from openai import OpenAI
from tenacity import retry, stop_after_attempt, wait_random_exponential
from app.whisper.llms.base_llm import BaseLLM
from app.core.config import settings
class OpenAILLM(BaseLLM):
def __init__(self, model=None):
if not model:
model = "gpt-4-1106-preview"
_ = load_dotenv()
self.openai_apikey = settings.openai.OPENAI_API_KEY
self.model = model
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
async def process_str(self, template):
client = OpenAI(api_key=self.openai_apikey)
stream = client.chat.completions.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': template}
],
temperature=0,
stream=True # this time, we set stream=True
)
for chunk in stream:
yield chunk.choices[0].delta.content
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
def process(self, messages, tools=None, tool_choice=None, response_format_type=None):
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.openai_apikey,
}
json_data = {
"model": self.model, "messages": messages, "temperature": 0, "seed": 313,
}
if response_format_type == "json":
json_data.update({"response_format": {"type": "json_object"}})
if tools is not None:
json_data.update({"tools": tools})
if tool_choice is not None:
tool_choice_data = {"type": "function", "function": {"name": tool_choice}}
json_data.update({"tool_choice": tool_choice_data})
try:
response = requests.post(
"https://api.openai.com/v1/chat/completions",
headers=headers,
json=json_data,
)
return response
except Exception as e:
loguru.logger.info("Unable to generate ChatCompletion response")
loguru.logger.info(f"Exception: {e}")
return e
def format_chat_history(self, chat_history):
pass
|
CloudWhisperCustomBot | app/whisper/llms/anthropic.py | import json
from loguru import logger
from tenacity import retry, stop_after_attempt, wait_random_exponential
from anthropic import Anthropic, AnthropicBedrock, APIConnectionError, APIStatusError
from app.core.config import settings
from app.whisper.consts import WHISPER_USER_ROLE, WHISPER_ASSISTANT_ROLE
from app.whisper.llms.base_llm import BaseLLM
class AnthropicLLM(BaseLLM):
def __init__(self, model="claude-3-5-haiku-latest"):
self.model = model
self.anthropic_apikey = settings.anthropic.ANTHROPIC_API_KEY
self.messages = []
self.client = Anthropic(api_key=self.anthropic_apikey)
self.aws_bedrock_model = "anthropic.claude-3-haiku-20240307-v1:0"
self.aws_bedrock_client = AnthropicBedrock(
aws_access_key=settings.aws.AWS_ACCESS_KEY,
aws_secret_key=settings.aws.AWS_SECRET_KEY,
aws_region=settings.aws.AWS_REGION
)
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
def process(self, system=None, tools=[], force_tool=False, tool_name=None, prompt_caching=False):
"""
Process the request using Anthropic API with fallback to AWS Bedrock if necessary.
"""
try:
response = self._anthropic_request(system, tools, force_tool, tool_name, prompt_caching)
logger.info(f"Received response: {response}")
if not response.content:
self.add_message(role=WHISPER_ASSISTANT_ROLE, content='null')
retry_message = f"Internal Feedback: You sent empty content in last iteration. Please generate response and Don't apologize as this is internal feedback."
self.add_message(role=WHISPER_USER_ROLE, content=retry_message)
retry_response = self._anthropic_request(system, tools, force_tool, tool_name, prompt_caching)
logger.info(f"Received retry response from Anthropic API: {retry_response}")
if not retry_response.content:
raise ValueError("Failed to receive non-empty response after retry")
return json.loads(retry_response.json()) if tools else retry_response.content[0].text
return json.loads(response.json()) if tools else response.content[0].text
except APIConnectionError as e:
logger.error(f"Anthropic API failed: {e} Caused by: {e.__cause__}")
except APIStatusError as e:
if e.status_code in [529, 429]:
logger.error(f"Anthropic API failed: {e}. Fallback to AWS Bedrock.")
return self.process_bedrock(system, tools, force_tool, tool_name, prompt_caching)
logger.error(f"Anthropic API failed: {e}")
except Exception as e:
logger.error(f"An error occurred with Anthropic API: {e}")
def _anthropic_request(self, system, tools, force_tool, tool_name, prompt_caching):
"""
Internal helper to handle different Anthropic request cases.
"""
params = {
"model": self.model,
"max_tokens": 4000,
"messages": self.messages,
"temperature": 0.0,
"tools": tools
}
if system:
params["system"] = system
if force_tool:
params["tool_choice"] = {"type": "tool", "name": tool_name}
if prompt_caching:
return self.client.beta.prompt_caching.messages.create(**params)
return self.client.messages.create(**params)
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
def process_bedrock(self, system=None, tools=[], tool_name=None, force_tool=False, prompt_caching=False):
"""
Fallback mechanism that sends the request to AWS Bedrock if Anthropic API fails.
"""
try:
response = self._bedrock_request(system, tools, force_tool, tool_name, prompt_caching)
logger.info(f"Received response from AWS Bedrock: {response}")
if not response.content:
logger.info("Response from AWS Bedrock is empty, retrying...")
self.add_message(role=WHISPER_ASSISTANT_ROLE, content='null')
retry_message = f"Internal Feedback: You sent empty content in last iteration. Please generate response and Don't apologize as this is internal feedback."
self.add_message(role=WHISPER_USER_ROLE, content=retry_message)
retry_response = self._bedrock_request(system, tools, force_tool, tool_name, prompt_caching)
logger.info(f"Received retry response from AWS Bedrock: {retry_response}")
if not retry_response.content:
logger.error("Failed to receive non-empty response from AWS Bedrock after retry")
raise ValueError("Failed to receive non-empty response from AWS Bedrock after retry")
return json.loads(retry_response.json()) if tools else retry_response.content[0].text
return json.loads(response.json()) if tools else response.content[0].text
except APIConnectionError as e:
logger.error(f"AWS Bedrock API failed: {e} Caused by: {e.__cause__}")
except APIStatusError as e:
logger.error(f"AWS Bedrock API failed: {e}")
except Exception as e:
logger.error(f"An error occurred with AWS Bedrock API: {e}")
def _bedrock_request(self, system, tools, force_tool, tool_name, prompt_caching):
"""
Internal helper to handle different AWS Bedrock request cases.
"""
params = {
"model": self.aws_bedrock_model,
"max_tokens": 4000,
"messages": self.messages,
"temperature": 0.0,
"tools": tools
}
if system:
params["system"] = system
if force_tool:
params["tool_choice"] = {"type": "tool", "name": tool_name}
if prompt_caching:
return self.aws_bedrock_client.beta.prompt_caching.messages.create(**params)
return self.aws_bedrock_client.messages.create(**params)
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
async def process_stream(self, system=None, tools=[], tool_choice=None):
try:
params = {
"model": self.model,
"max_tokens": 4000,
"messages": self.messages,
"temperature": 0.0,
"tools": tools,
"stream": True
}
if system:
params["system"] = system
stream = self.client.messages.create(**params)
for event in stream:
if event.type == "content_block_delta":
yield event.delta.text
stream.close()
except APIConnectionError as e:
logger.error(f"Anthropic API stream failed: {e} Caused by: {e.__cause__}")
except APIStatusError as e:
if e.status_code in [529, 429]:
logger.error(f"Anthropic API stream failed: {e}. Fallback to AWS Bedrock Stream.")
async for response in self.process_stream_bedrock(system, tools, tool_choice):
yield response
logger.error(f"Anthropic API stream failed: {e}")
except Exception as e:
logger.error(f"An error occurred with Anthropic API stream: {e}")
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
async def process_stream_bedrock(self, system=None, tools=[], tool_choice=None):
"""
Fallback mechanism for stream processing using AWS Bedrock.
"""
try:
params = {
"model": self.aws_bedrock_model,
"max_tokens": 4000,
"messages": self.messages,
"temperature": 0.0,
"tools": tools,
"stream": True
}
if system:
params["system"] = system
stream = self.aws_bedrock_client.messages.create(**params)
for event in stream:
if event.type == "content_block_delta":
yield event.delta.text
stream.close()
except APIConnectionError as e:
logger.error(f"AWS Bedrock stream failed: {e} Caused by: {e.__cause__}")
except APIStatusError as e:
logger.error(f"AWS Bedrock stream failed: {e}")
except Exception as e:
logger.error(f"An error occurred with AWS Bedrock stream: {e}")
def add_message(self, role, content):
if role == WHISPER_USER_ROLE:
role = 'user'
elif role == WHISPER_ASSISTANT_ROLE:
role = 'assistant'
self.messages.append({"role": role, "content": content})
def format_chat_history(self, chat_history, update_messages=False):
messages = []
for chat in chat_history:
if chat['type'].lower() in ['user', 'human']:
messages.append({'role': 'user', 'content': str(chat['text'])})
elif chat['type'].lower() in ['ai', 'assistant']:
messages.append({'role': 'assistant', 'content': str(chat['text'])})
if update_messages:
self.messages.extend(messages)
return messages |
CloudWhisperCustomBot | app/whisper/llms/mistral.py | import asyncio
import os
import aiohttp
from loguru import logger
from app.whisper.llms.base_llm import BaseLLM
class MistralLLM(BaseLLM):
MISTRAL_HOST = os.environ.get('MISTRAL_HOST', "35.232.5.43")
MISTRAL_PORT = os.environ.get('MISTRAL_PORT', "9090")
MISTRAL_PATH = f'http://{MISTRAL_HOST}:{MISTRAL_PORT}'
def __init__(self):
pass
async def process_stream(self, messages, system_prompt=None):
headers = {
"Content-Type": "application/json",
}
if system_prompt:
json_data = {"system_prompt": system_prompt, "messages": messages, "stream": True}
else:
json_data = {"messages": messages, "stream": True}
try:
async with aiohttp.ClientSession() as session:
async with session.get(f"{self.MISTRAL_PATH}/chat",
headers=headers,
json=json_data) as response:
async for chunk in response.content.iter_any():
yield chunk.decode()
await asyncio.sleep(0)
except Exception as e:
raise Exception(f"Exception: {e}")
async def process(self, messages, system_prompt=None, stopping_criteria=None):
headers = {
"Content-Type": "application/json",
}
json_data = {"system_prompt": system_prompt, "messages": messages} if system_prompt else {"messages": messages}
if stopping_criteria:
json_data["stopping_criteria"] = stopping_criteria
logger.info(f'{json_data}')
try:
async with aiohttp.ClientSession() as session:
async with session.get(
f"{self.MISTRAL_PATH}/chat",
headers=headers,
json=json_data,
) as response:
return await response.text()
except Exception as e:
raise Exception(f"Exception: {e}")
def format_chat_history(self, chat_history):
messages = []
for chat in chat_history:
if chat['type'] == 'user':
messages.append({'role': 'user', 'content': str(chat['message'])})
elif chat['type'] == 'assistant':
messages.append({'role': 'assistant', 'content': str(chat['message'])})
print(messages)
return messages
|
CloudWhisperCustomBot | app/whisper/llms/base_llm.py | from abc import ABC, abstractmethod
class BaseLLM(ABC):
@abstractmethod
def process(self, *args, **kwargs):
pass
@abstractmethod
def format_chat_history(self, chat_history):
pass
|
CloudWhisperCustomBot | app/whisper/llms/groq.py | from groq import Groq
from loguru import logger
from tenacity import retry, stop_after_attempt, wait_random_exponential
from app.core.config import settings
class GroqLLM:
def __init__(self, model="llama-3.1-8b-instant"):
self.model = model
self.groq_api_key = settings.groq.GROQ_API_KEY
self.messages = []
self.client = Groq(api_key=self.groq_api_key)
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
def process(self, system=None, tools=[], force_tool=False, tool_name=None, prompt_caching=False):
try:
params = {
"model": self.model,
"max_tokens": 4000,
"messages": self.messages,
"temperature": 0.0,
"tools": tools
}
if system:
params["system"] = system
if force_tool:
params["tool_choice"] = {"type": "tool", "name": tool_name}
response = self.client.chat.completions.create(**params)
logger.info(f"Received response: {response}")
return response.choices[0].message.content
except Exception as e:
logger.error(f"An error occurred with Groq API: {e}")
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
async def process_stream(self, system=None, tools=[], tool_choice=None):
try:
params = {
"model": self.model,
"max_tokens": 4000,
"messages": self.messages,
"temperature": 0.0,
"tools": tools,
"stream": True
}
if system:
params["system"] = system
stream = self.client.chat.completions.create(**params)
for chunk in stream:
if chunk.choices:
delta = chunk.choices[0].delta
if delta.content is not None:
yield delta.content
stream.close()
except Exception as e:
logger.error(f"An error occurred with Groq API stream: {e}")
def add_message(self, role, content):
if role == 'user':
role = 'user'
elif role == 'assistant':
role = 'assistant'
elif role == 'system':
role = 'system'
self.messages.append({"role": role, "content": content})
def format_chat_history(self, chat_history, update_messages=False):
messages = []
for chat in chat_history:
if chat['type'].lower() in ['user', 'human']:
messages.append({'role': 'user', 'content': str(chat['text'])})
elif chat['type'].lower() in ['ai', 'assistant']:
messages.append({'role': 'assistant', 'content': str(chat['text'])})
if update_messages:
self.messages.extend(messages)
return messages
|
CloudWhisperCustomBot | app/knowledge_graph/spec_resolver.py | import json
def resolve_ref(ref, components):
if ref.startswith("#/components/schemas/"):
ref_name = ref.split("/")[-1]
# if ref_name == 'DisasterRecoveryBackupOut':
# return {}
print(components.get("schemas", {}).get(ref_name, {}))
return components.get("schemas", {}).get(ref_name, {})
return {}
def process_schema(schema, components):
if "$ref" in schema:
ref = schema["$ref"]
resolved_schema = resolve_ref(ref, components)
return process_schema(resolved_schema, components)
elif "properties" in schema:
properties = schema["properties"]
for key, value in properties.items():
properties[key] = process_schema(value, components)
elif "items" in schema:
items = schema["items"]
schema["items"] = process_schema(items, components)
elif 'allOf' in schema:
allOf = schema['allOf']
for data in allOf:
if 'description' in schema:
schema = {"items": process_schema(data, components), 'description': schema['description']}
else:
schema = {"items": process_schema(data, components)}
else:
return schema
return schema
def resolve_request_body(request_body, components):
if "content" in request_body:
content = request_body["content"]
content_type = list(content.keys())[0] if content else None
schema = content.get(content_type, {}).get("schema", {})
resolved_schema = process_schema(schema, components)
request_body["content"][content_type]["schema"] = resolved_schema
return request_body
def resolve_response_body(request_body, components):
if "content" in request_body:
content = request_body['content']
content_type = list(content.keys())[0] if content else None
schema = content.get(content_type, {}).get("schema", {})
if schema:
print(schema)
resolved_schema = process_schema(schema, components)
request_body["content"][content_type]["schema"] = resolved_schema
return request_body
def resolve_schema_references(schema, components):
if "$ref" in schema:
ref = schema["$ref"]
resolved_schema = resolve_ref(ref, components)
return resolve_schema_references(resolved_schema, components)
elif "properties" in schema:
properties = schema["properties"]
for key, value in properties.items():
properties[key] = resolve_schema_references(value, components)
elif "items" in schema:
items = schema["items"]
schema["items"] = resolve_schema_references(items, components)
elif "allOf" in schema:
all_of = schema["allOf"]
resolved_all_of = []
for item in all_of:
resolved_item = resolve_schema_references(item, components)
resolved_all_of.append(resolved_item)
schema["allOf"] = resolved_all_of
elif "anyOf" in schema:
any_of = schema["anyOf"]
resolved_any_of = []
for item in any_of:
resolved_item = resolve_schema_references(item, components)
resolved_any_of.append(resolved_item)
schema["anyOf"] = resolved_any_of
elif "allOf" in schema:
all_of = schema["allOf"]
resolved_any_of = []
for item in all_of:
resolved_item = resolve_schema_references(item, components)
resolved_any_of.append(resolved_item)
schema["anyOf"] = resolved_any_of
elif "oneOf" in schema:
one_of = schema["oneOf"]
resolved_one_of = []
for item in one_of:
resolved_item = resolve_schema_references(item, components)
resolved_one_of.append(resolved_item)
schema["oneOf"] = resolved_one_of
elif "not" in schema:
not_schema = schema["not"]
resolved_not = resolve_schema_references(not_schema, components)
schema["not"] = resolved_not
elif "content" in schema:
content = schema["content"]
for media_type, media_info in content.items():
schema = media_info.get("schema", {})
media_info["schema"] = resolve_schema_references(schema, components)
return schema
if __name__ == '__main__':
# Load a JSON file and resolve all reference ("transformation") in the "paths" key.
with open("openapi.json") as f:
ibm_api_spec = json.load(f)
api_spec = ibm_api_spec
get_api_data = dict()
for route, methods in api_spec.get("paths", {}).items():
for method, endpoint_docs in methods.items():
if method == 'get':
request_body = endpoint_docs.get("requestBody", {}) if endpoint_docs else {}
response_body = endpoint_docs.get("responses", {}).get('202', {}) if endpoint_docs else {}
response200 = endpoint_docs.get("responses", {}).get("200", {}) if endpoint_docs else {}
if request_body:
resolved_request_body = resolve_request_body(request_body, api_spec.get("components", {}))
endpoint_docs["requestBody"] = resolved_request_body
if response_body:
resolved_response_body = resolve_response_body(response_body, api_spec.get("components", {}))
endpoint_docs["responses"]['202'] = resolved_response_body
if response200:
resolved_response_body = resolve_response_body(response200, api_spec.get("components", {}))
endpoint_docs["responses"]['200'] = resolved_response_body
# Update the endpoint docs in the original structure
get_api_data[route] = {method: endpoint_docs}
# api_spec["paths"][route] = {method: endpoint_docs}
api_spec['paths'] = get_api_data
# Write the resolved JSON to a new file
with open("resolved_api.json", "w") as output_file:
json.dump(api_spec, output_file, indent=2)
|
CloudWhisperCustomBot | app/models/cost_report.py | import uuid
from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.sql import func
from app.models.base import Base
class CloudCostReport(Base):
ID_KEY = "id",
USER_ID_KEY = "user_id",
CLOUD_ID_KEY = "cloud_id",
RESOURCE_JSON_KEY = "resource_json"
CREATED_AT_KEY = "created_at"
__tablename__ = 'cloud_cost_reports'
id = Column(String(32), primary_key=True)
cloud_id = Column(String(32), nullable=False)
user_id = Column(String(32), nullable=False)
resource_json = Column("resource_json", JSONB, nullable=False)
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
def __init__(self, cloud_id, user_id, resource_json):
self.id = str(uuid.uuid4().hex)
self.cloud_id = cloud_id
self.user_id = user_id
self.resource_json = resource_json
|
CloudWhisperCustomBot | app/models/activity_tracking.py | import uuid
from sqlalchemy import Column
from sqlalchemy import select
from sqlalchemy.sql.sqltypes import String, Text, JSON, Boolean
from sqlalchemy.sql.schema import ForeignKey
from sqlalchemy.orm import deferred
from loguru import logger
from app import models
from app.models.base import Base
class ActivityTracking(Base):
ID_KEY = 'id'
WORKFLOW_ID_KEY = "workflow_id"
USER_ID_KEY = "user_id"
RESOURCE_NAME_KEY = 'resource_name'
RESOURCE_TYPE_KEY = 'resource_type'
FE_REQUEST_DATA_KEY = "fe_request_data"
ACTIVITY_TYPE_KEY = 'activity_type'
CREATED_AT_KEY = 'created_at'
STARTED_AT_KEY = 'started_at'
COMPLETED_AT_KEY = 'completed_at'
SUMMARY_KEY = 'summary'
EMAIL_KEY = 'email'
STATUS_KEY = 'status'
CHAT_KEY = "chat"
ACTION_KEY = "action"
STATUS_PENDING = "PENDING"
# The task has been initiated, but is not yet picked up by any worker
STATUS_INITIATED = "INITIATED"
# At least one of the tasks in this tree is running
STATUS_RUNNING = "RUNNING"
# Some roots in the tree were successful but some are still ON_HOLD
STATUS_ON_HOLD_WITH_SUCCESS = "ON_HOLD_WITH_SUCCESS"
poling_statuses_list = [STATUS_PENDING, STATUS_INITIATED, STATUS_RUNNING, STATUS_ON_HOLD_WITH_SUCCESS]
STATUS_C_SUCCESSFULLY = "COMPLETED_SUCCESSFULLY"
STATUS_C_W_FAILURE = "COMPLETED_WITH_FAILURE"
STATUS_ON_HOLD_WITH_FAILURE = "ON_HOLD_WITH_FAILURE"
failure_statues = [STATUS_C_W_FAILURE, STATUS_ON_HOLD_WITH_FAILURE]
VPC_RESOURCE_TYPE = "IBMVpcNetwork"
IKS_RESOURCE_TYPE = "IBMKubernetesCluster"
resource_types_list = [VPC_RESOURCE_TYPE, IKS_RESOURCE_TYPE]
RESTORE = "RESTORE"
BACKUP = "BACKUP"
__tablename__ = "activity_tracking"
id = Column(String(32), primary_key=True)
workflow_id = Column(String(32), nullable=False)
user_id = Column(String(32), nullable=False)
resource_name = Column(String(128), nullable=False)
resource_type = Column(String(128), nullable=False, index=True)
fe_request_data = deferred(Column(JSON))
activity_type = Column(String(64), nullable=False, index=True)
created_at = Column(String(256), nullable=False)
started_at = Column(String(256), nullable=True)
completed_at = Column(String(256), nullable=True)
summary = Column(Text, nullable=False)
email = Column(String(255), nullable=False)
status = Column(String(128), default="PENDING", nullable=False)
is_polled = Column(Boolean, nullable=True, default=False)
chat_id = Column(String(32), nullable=False)
action_id = Column(String(32), nullable=False)
profile_id = Column(String(32), ForeignKey('profiles.id', ondelete='CASCADE'), nullable=False)
def __init__(self, workflow_id, user_id, resource_name, resource_type, activity_type, created_at, summary, email,
status, chat_id, action_id, started_at=None, completed_at=None, fe_request_data=None):
self.id = str(uuid.uuid4().hex)
self.workflow_id = workflow_id
self.user_id = user_id
self.resource_name = resource_name
self.resource_type = resource_type
self.activity_type = activity_type
self.created_at = created_at
self.started_at = started_at
self.completed_at = completed_at
self.summary = summary
self.email = email
self.status = status
self.chat_id = chat_id
self.action_id = action_id
self.fe_request_data = fe_request_data
def to_reference_json(self):
return {
self.ID_KEY: self.id,
self.workflow_id : self.workflow_id,
self.user_id : self.user_id,
self.resource_name : self.resource_name,
self.resource_type : self.resource_type,
self.activity_type : self.activity_type
}
async def to_event_json(self, db_session):
action = (
await db_session.scalars(select(models.Action).filter(models.Action.id == self.action_id))).one_or_none()
if action:
return {
"name": action.name,
"workflow_id": self.workflow_id
}
async def to_json(self, db_session):
# Load the 'fe_request_data' if it hasn't been loaded yet
if not self.fe_request_data:
self.fe_request_data = await db_session.scalar(
select(self.fe_request_data)
.where(self.id == self.id)
)
chat = (await db_session.scalars(select(models.Chat).filter(models.Chat.id == self.chat_id))).one_or_none()
action = (
await db_session.scalars(select(models.Action).filter(models.Action.id == self.action_id))).one_or_none()
return {
self.ID_KEY: self.id,
self.WORKFLOW_ID_KEY: self.workflow_id,
self.USER_ID_KEY: self.user_id,
self.RESOURCE_NAME_KEY: self.resource_name,
self.RESOURCE_TYPE_KEY: self.resource_type,
self.FE_REQUEST_DATA_KEY: self.fe_request_data or {}, # Handle case when it's None or empty
self.ACTIVITY_TYPE_KEY: self.activity_type,
self.CREATED_AT_KEY: self.created_at,
self.STARTED_AT_KEY: self.started_at if self.started_at else None,
self.COMPLETED_AT_KEY: self.completed_at if self.completed_at else None,
self.SUMMARY_KEY: self.summary,
self.EMAIL_KEY: self.email,
self.STATUS_KEY: self.status,
self.CHAT_KEY: chat.to_reference_json() if chat else None,
self.ACTION_KEY: action.to_reference_json() if action else None
}
|
CloudWhisperCustomBot | app/models/__init__.py | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .chat import Chat, Message, Action
from .profile import Profile
from .activity_tracking import ActivityTracking
from .chat import Chat, Message, Action
from .profile import Profile
from .activity_tracking import ActivityTracking
__all__ = ['Action', 'ActivityTracking', 'Profile', 'Chat', "CloudCostReport", 'Message']
|
CloudWhisperCustomBot | app/models/chat.py | import uuid
from sqlalchemy import Column, Enum
from sqlalchemy.orm import relationship
from sqlalchemy.sql.schema import ForeignKey
from sqlalchemy.sql.sqltypes import Boolean, String, Text, TIMESTAMP
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.sql.expression import text
from app.models.base import Base
from app.web.common.consts import CREATED_AT_FORMAT_WITH_MILLI_SECONDS
class Chat(Base):
ID_KEY = "id"
TITLE_KEY = "title"
CHAT_TYPE_KEY = "type"
IS_VISIBLE_KEY = "is_visible"
CREATED_AT_KEY = "created_at"
TYPE_QNA = "QnA"
TYPE_ACTION = "Action"
CHAT_TYPE_LIST = [TYPE_QNA, TYPE_ACTION]
MESSAGES_KEY = "messages"
PROFILE_KEY = "user"
BOT_KEY = "bot"
__tablename__ = "chats"
id = Column(String(32), primary_key=True)
title = Column(String(200), nullable=False)
chat_type = Column(Enum(*CHAT_TYPE_LIST, name="chat_type_enum", create_type=False), default=TYPE_QNA)
is_visible = Column(Boolean, nullable=False, default=True)
created_at = Column(TIMESTAMP(timezone=False), default=text('now()'), nullable=False)
confirmation_stage = Column(Boolean, nullable=False, default=False)
json_metadata = Column("json_metadata", JSONB, nullable=True)
profile_id = Column(String(32), ForeignKey('profiles.id'), nullable=True)
messages = relationship('Message', passive_deletes=True, back_populates='chat', cascade="all, delete-orphan",
order_by="Message.sent_at", lazy="selectin")
def __init__(self, title, chat_type=TYPE_QNA, is_visible=True):
self.id = str(uuid.uuid4().hex)
self.title = title
self.chat_type = chat_type
self.is_visible = is_visible
def to_reference_json(self):
return {
self.ID_KEY: self.id,
self.TITLE_KEY: self.title,
self.CHAT_TYPE_KEY: self.chat_type
}
def to_json(self):
return {
self.ID_KEY: self.id,
self.TITLE_KEY: self.title,
self.CHAT_TYPE_KEY: self.chat_type,
self.CREATED_AT_KEY: self.created_at.strftime(CREATED_AT_FORMAT_WITH_MILLI_SECONDS),
self.MESSAGES_KEY: [message.to_reference_json() for message in self.messages],
self.PROFILE_KEY: self.profile.to_reference_json() if self.profile else {},
}
class Message(Base):
ID_KEY = "id"
TEXT_KEY = "text"
TYPE_KEY = "type"
SENT_AT_KEY = "sent_at"
CHAT_KEY = "chat"
ACTION_KEY = "action"
IS_VISIBLE_KEY = "is_visible"
TYPE_ASSISTANT = "Assistant"
TYPE_HUMAN = "Human"
TYPE_QNA = "QnA"
TYPE_ACTION = "Action"
MESSAGE_TYPE_LIST = [TYPE_ASSISTANT, TYPE_HUMAN]
MESSAGE_CATEGORY = [TYPE_QNA, TYPE_ACTION]
__tablename__ = "messages"
id = Column(String(32), primary_key=True)
content = Column("text", Text, nullable=False)
type = Column(Enum(*MESSAGE_TYPE_LIST, name="message_type_enum", create_type=False), nullable=False)
is_visible = Column(Boolean, nullable=False, default=True)
sent_at = Column(TIMESTAMP(timezone=False), default=text('now()'), nullable=False)
msg_category = Column(Enum(*MESSAGE_CATEGORY, name="message_category"))
json_metadata = Column("json_metadata", JSONB, nullable=True)
chat_id = Column(String(32), ForeignKey('chats.id', ondelete='CASCADE'))
action_id = Column(String(32), ForeignKey('actions.id', ondelete='CASCADE'), nullable=True)
chat = relationship("Chat", back_populates="messages")
action = relationship("Action", back_populates="messages")
def __init__(self, content, msg_type, msg_category=None, is_visible=True):
self.id = str(uuid.uuid4().hex)
self.content = content
self.type = msg_type
self.msg_category = msg_category
self.is_visible = is_visible
def to_reference_json(self):
return {
self.ID_KEY: self.id,
self.TEXT_KEY: self.content,
self.TYPE_KEY: self.type,
self.IS_VISIBLE_KEY: self.is_visible,
self.SENT_AT_KEY: self.sent_at.strftime(CREATED_AT_FORMAT_WITH_MILLI_SECONDS),
}
def to_json(self):
return {
self.ID_KEY: self.id,
self.TEXT_KEY: self.content,
self.TYPE_KEY: self.type,
self.IS_VISIBLE_KEY: self.is_visible,
self.SENT_AT_KEY: self.sent_at.strftime(CREATED_AT_FORMAT_WITH_MILLI_SECONDS),
self.ACTION_KEY: self.action.to_json() if self.action else None
}
class Action(Base):
ID_KEY = "id"
NAME_KEY = "name"
MESSAGES_KEY = "messages"
METADATA_KEY = "metadata"
CREATED_AT_KEY = "created_at"
__tablename__ = "actions"
id = Column(String(32), primary_key=True)
name = Column(Text, nullable=False)
json_metadata = Column("json_metadata", JSONB, nullable=True)
created_at = Column(TIMESTAMP(timezone=False), default=text('now()'), nullable=False)
messages = relationship('Message', passive_deletes=True, back_populates='action', cascade="all, delete-orphan",
order_by="Message.sent_at", lazy='selectin')
def __init__(self, name, metadata):
self.id = str(uuid.uuid4().hex)
self.json_metadata = metadata
self.name = name
def to_reference_json(self):
return {
self.ID_KEY: self.id,
self.NAME_KEY: self.name,
}
def to_json(self):
return {
self.ID_KEY: self.id,
self.NAME_KEY: self.name,
self.METADATA_KEY: self.json_metadata,
self.MESSAGES_KEY: [message.to_reference_json() for message in self.messages],
}
|
CloudWhisperCustomBot | app/models/profile.py | import uuid
from sqlalchemy import Column, Enum
from sqlalchemy.orm import relationship
from sqlalchemy.sql.sqltypes import Boolean, String, TIMESTAMP, JSON
from sqlalchemy.dialects.postgresql import JSONB
from app.models.base import Base
from app.web.profiles.schemas import OnboardingStatus
class Profile(Base):
ID_KEY = "id"
USER_ID_KEY = "user_id"
PROJECT_ID_KEY = "project_id"
NAME_KEY = "name"
EMAIL_KEY = "email"
IS_ADMIN_KEY = "is_admin"
API_KEY_STATUS = "api_key_status"
LAST_UPDATED_AT_KEY = "last_updated_at"
APPEARANCE_KEY = "appearance"
# CREDENTIAL_KEY = "credential"
SETTINGS_KEY = "settings"
ON_BOARDED_KEY = "on_boarded"
POLICY_AGREED_KEY = "policy_agreed"
PROVIDER_KEY = "provider"
PAYMENT_KEY = "payment"
CHATS_KEY = "chats"
BOTS_KEY = "bots"
ONBOARDING_KEY = "onboarding"
__tablename__ = "profiles"
id = Column(String(32), primary_key=True)
user_id = Column(String(36), nullable=False)
project_id = Column(String(), nullable=True)
name = Column(String(50), nullable=False)
email = Column(String(255), nullable=False)
is_admin = Column(Boolean, nullable=False, default=False)
api_key = Column(JSONB, nullable=True)
api_key_status = Column(String(36), nullable=True)
appearance = Column(JSON, default={})
last_updated_at = Column(TIMESTAMP(timezone=True), nullable=True)
onboarding = Column(Enum(OnboardingStatus), nullable=False, default=OnboardingStatus.app_tour)
chats = relationship('Chat', passive_deletes=True, backref='profile', cascade="all, delete-orphan")
activity_trackings = relationship('ActivityTracking', passive_deletes=True, backref='profile',
cascade="all, delete-orphan")
def __init__(
self, name, email, user_id, project_id=None, is_admin=False, appearance=None, onboarding=OnboardingStatus.app_tour):
self.id = str(uuid.uuid4().hex)
self.name = name
self.user_id = user_id
self.project_id = project_id
self.email = email
self.is_admin = is_admin
self.appearance = appearance if appearance else {}
def to_reference_json(self):
return {
self.ID_KEY: self.id,
self.NAME_KEY: self.name,
self.EMAIL_KEY: self.email,
self.ONBOARDING_KEY: self.onboarding
}
def to_json(self):
return {
self.ID_KEY: self.id,
self.NAME_KEY: self.name,
self.EMAIL_KEY: self.email,
self.USER_ID_KEY: self.user_id if self.user_id else None,
self.PROJECT_ID_KEY: self.project_id if self.project_id else None,
self.IS_ADMIN_KEY: self.is_admin,
self.CHATS_KEY: [chat.to_reference_json() for chat in self.chats],
self.APPEARANCE_KEY: self.appearance if self.appearance else None,
self.ONBOARDING_KEY: self.onboarding
}
|
CloudWhisperCustomBot | app/models/base.py | from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
|
CloudWhisperCustomBot | app/api_discovery/delete_redundant_node_task.py | from app.core.config import neo4j_driver as driver
def delete_redundant_nodes(user_id, timestamp):
with driver.session() as session:
query = f"""MATCH (n) WHERE n.discovered_at < {timestamp} AND n.user_id = '{user_id}' DETACH DELETE n;"""
session.write_transaction(lambda tx, q=query: tx.run(q))
return
|
CloudWhisperCustomBot | app/api_discovery/classic_api_schema.json | {
"v1/softlayer/accounts": {
"id=softlayer_cloud_id": {
"v1/softlayer/discover": {
"/pid=root_id+associated_tasks[*].id=task_id": [
"v1/ibm/workflows/{root_id}/tasks/{task_id}"
]
}
}
}
} |
CloudWhisperCustomBot | app/api_discovery/neo4j_query.py | import anthropic
import openai
from loguru import logger
from neo4j import GraphDatabase
history = []
node_properties_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE NOT type = "RELATIONSHIP" AND elementType = "node"
WITH label AS nodeLabels,
collect(CASE WHEN property <> 'user_id' THEN property END) AS properties
RETURN {labels: nodeLabels, properties: [p IN properties WHERE p IS NOT NULL]} AS output
"""
node_names_query = """
MATCH (n) RETURN n.name
"""
rel_properties_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE NOT type = "RELATIONSHIP" AND elementType = "relationship"
WITH label AS nodeLabels, collect(property) AS properties
RETURN {type: nodeLabels, properties: properties} AS output
"""
rel_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE type = "RELATIONSHIP" AND elementType = "node"
RETURN {source: label, relationship: property, target: other} AS output
"""
label_query = """
CALL db.labels() YIELD label
WHERE label CONTAINS '{user_id}'
RETURN label;
"""
def schema_text(node_props, rel_props, rels, user_id):
prompt = f"""
This is the schema representation of the Neo4j database.
Node properties are the following:
{node_props}
Relationship properties are the following:
{rel_props}
Relationship point from source to target nodes
{rels}
Make sure to respect relationship types and directions
"""
return prompt.replace(f'_{user_id}', '').replace('user_id', '')
class Neo4jGPTQuery:
def __init__(self, url, user, password, openai_api_key, action, user_id):
self.driver = GraphDatabase.driver(url, auth=(user, password))
openai.api_key = openai_api_key
# construct schema
self.retry = 0
self.schema = self.generate_schema(user_id)
self.action = action
self.user_id = user_id
self.labels = self.get_labels()
self.messages = list()
def get_labels(self):
# logger.info(self.user_id)
# logger.info(label_query.format(user_id=self.user_id))
labels_response = self.query_database(label_query.format(user_id=self.user_id))
# logger.info(f"Labels response: {labels_response}")
labels_response = list({label[0].replace(f'_{self.user_id}', '') for label in labels_response})
return labels_response
def add_user_id(self, cypher):
import re
for label in self.labels:
if f'{label}_{self.user_id}' in cypher:
continue
pattern = f':{label}\\b'
replacement = f':{label}_{self.user_id}'
cypher = re.sub(pattern, replacement, cypher)
logger.info('<<<<add use is cypher query>>>>>')
logger.info(cypher)
return cypher
def generate_schema(self, user_id):
node_props = self.query_database(node_properties_query)
rel_props = self.query_database(rel_properties_query)
rels = self.query_database(rel_query)
return schema_text(node_props, rel_props, rels, user_id=user_id)
def refresh_schema(self):
self.schema = self.generate_schema(user_id=self.user_id)
def get_system_message(self):
return f"""You are a helpful Neo4j assistant who generates intelligent Cypher queries to help user finds out information from a Neo4j graph database based on the provided schema definition, without giving any explanation.
<instructions>Strictly follow these instructions:
1. Use only the provided relationship types and properties.
2. Always query across all properties for the given search term using the 'any' key.
3. Do not include any explanations or apologies in your responses.
5. Make sure to return id and names in all queries
6. Provide a clear explanation of the relationships between the nodes:
- Identify the starting node and the target node based on the user's question.
- Trace the path from the starting node to the target node, considering all the relationships and intermediate nodes along the way.
- If multiple relationship paths exist between nodes, select the shortest path.
- Explain each relationship and its direction, highlighting any intermediate nodes encountered.
- Choose direct relationship instead of intermediate nodes
- Summarize the overall path and the connections between the nodes
7. Retrieve only information asked in query. Avoid retrieving extra info</instructions>
<cypher_techniques>How to write a query:
A. If no specific param is mentioned, start with a simple query.
B. If a specific attribute is mentioned, use regex or fuzzy matching instead of exact matches to search for values even if some characters are missing or if there are typos in the search query. In Neo4j, you can achieve this by using techniques like:
a. Regex matching for more flexible pattern searches:
<example>WHERE ANY (x IN [prop IN n WHERE toString(prop) =~ '(?i).searchTerm.'] WHERE x)</example>
b. Similarity algorithms (e.g., Levenshtein distance)
<example>WHERE ANY (x IN [prop IN n WHERE toString(prop)] WHERE apoc.text.levenshteinSimilarity(x, 'searchTerm') > 0.8)</example>
C. If a property value is an array, you must handle it appropriately:
a. If you want to check if any element in the array matches the search term, use:
<example>WHERE ANY (x IN [prop IN n WHERE toString(prop)] WHERE x =~ '(?i).searchTerm.')</example>
b. If you want to check if the entire array matches the search term, convert it to a string first:
<example>WHERE apoc.text.levenshteinSimilarity(toString([prop IN n WHERE toString(prop)]), 'searchTerm') > 0.5</example>
c. If you want to check if a specific element in the array matches the search term, access it by index:
<example>WHERE apoc.text.levenshteinSimilarity([prop IN n WHERE toString(prop)][index], 'searchTerm') > 0.5</example>
D. You can Use shortestPath to find connections between two main nodes if nodes are not directly:
<example>
MATCH p=shortestPath((n1:Node1)-[*]-(n2:Node2))
WHERE ANY(x IN [n1, n2] WHERE ANY(y IN [n1.prop1, n1.prop2, ...] WHERE toString(y) =~ '(?i).*searchTerm.*'))
WITH n1, n2
RETURN n1 | {{.id, .prop1, .prop2, ...}} as startNode,
n2 {{.id, .prop1, .prop2, ...}} as endNode,
pathNodes, pathRels
</example></cypher_techniques>
<database_schema>
I'm providing the database schema to help you understand what data exists in the database, enabling you to intelligently retrieve information from it.
{self.schema}
</database_schema>
Strictly provide output in the following format:
Nodes: [Identify the nodes required to find out information for the question]
Relationships: [Critically dig out relationships between relevant nodes step-by-step from provided database schema without hallucinating.]
Explanation: [Provide a clear explanation of how the nodes are connected, including any intermediate nodes and the direction of the relationships]
Shortest_Path: [If you think the shortest path algorithm is required, then plan out efficiently to determine nodes and relationships]
Thought: [think step by step to construct query using information in previous steps]
Cypher: [Generate the Cypher query]
Enhanced Cypher: [Enhance previous query by searching through all properties everywhere and use Regex/levenshteinSimilarity/phonetic/metaphone]"""
def query_database(self, neo4j_query, params={}):
#######
if '()' not in neo4j_query:
neo4j_query = self.add_user_id(neo4j_query)
with self.driver.session() as session:
result = session.run(neo4j_query, params)
output = [r.values() for r in result]
if output:
output.insert(0, result.keys())
return output
else:
return None
def construct_cypher(self, question, client, history, is_depth=False):
if is_depth:
prompt = self.get_depth_system_message()
model = "claude-3-sonnet-20240229"
else:
prompt = self.get_system_message()
model = "claude-3-haiku-20240307"
messages = [
{"role": "user", "content": f'\n Question:{question}'}
]
# Used for Cypher healing flows
if history:
messages.extend(history)
message = client.messages.create(
system=prompt,
model=model,
max_tokens=1024,
messages=messages,
temperature=0.0
)
return message.content[0].text
def generate_cypher(self, question):
self.messages.append({'role': 'user', 'content': question})
client = anthropic.Anthropic(
# defaults to os.environ.get("ANTHROPIC_API_KEY")
api_key="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",
)
completion2 = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=4000,
system=self.get_system_message(),
messages=self.messages,
temperature=0.0
)
self.messages.append({'role': 'assistant', 'content': completion2.content[0].text})
response = completion2.content[0].text
if '->' in response:
response = response.replace('->', '-')
if '<-' in response:
response = response.replace('<-', '-')
if "```cypher" in response:
response = response.replace("```cypher", "")
if "```" in response:
response = response.replace("```", "")
cypher = ''
enhanced_cypher = ''
if 'Thought' in response or 'Cypher:' in response:
if 'Enhanced Cypher:' in response:
enhanced_cypher = response.split('Enhanced Cypher:')[1]
response = response.split('Enhanced Cypher:')[0]
logger.info(f"Enhanced Cypher: {enhanced_cypher}")
if 'Cypher:' in response:
cypher = response.split('Cypher:')[1] if 'Cypher:' in response else response
logger.info('<<<<<printing cypher>>>>')
logger.info(cypher)
logger.info('<<<<<>>>>')
return cypher, enhanced_cypher
def run(self, question):
logger.info(f"QUESTION-->{question}")
if self.retry > 5:
return 'empty', 'empty'
cypher, enhanced_cypher = self.generate_cypher(question=question)
if not (cypher or enhanced_cypher):
feedback = "Internal feedback: Please review your format and follow the output format according to \
the prompt so its easy to parse data"
self.retry = self.retry + 1
return self.run(question=feedback)
try:
query_response = ''
if cypher:
query_response = self.query_database(cypher)
logger.info("<<<<<<<response cypher>>>>>>")
logger.info(query_response)
if not query_response:
cypher = enhanced_cypher
query_response = self.query_database(cypher)
logger.info("<<<<<<response cypher of enhance>>>>>>")
logger.info(query_response)
if not query_response: # Ask llm to retry
self.retry = self.retry + 1
logger.info('<<<<<<into retrying>>>>>')
feedback = """\
This query returned empty response. First verify relationship as you may have made a query with wrong \
relationship between nodes. If relationship is correct then add \
following things to query \n\n -levenshteinSimilarity \n -search through all properties.\n\n. \
If required you can rethink as well. Don't generate any explanation"""
return self.run(question=feedback)
if not query_response:
query_response = 'empty'
except Exception as e:
feedback = f"""This query returns an error: {str(e)}
Give me a improved query that works without any explanations or apologies"""
logger.info(feedback)
self.retry = self.retry + 1
return self.run(question=feedback)
return query_response, cypher
if __name__ == "__main__":
query = "What is the profile of the VSI associated with the backup '2c2cd8bdebdb4236a8fd54ec395f14c4'?"
openai_key = "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"
gds_db = Neo4jGPTQuery(
url="bolt://localhost:7687",
user="neo4j",
password="72054321",
openai_api_key=openai_key,
action="Restore VSI backup",
user_id="f572286e4b2a4c16bd9a98111dca21a8"
)
print(gds_db.schema)
# output = gds_db.assess_tool(question=query, client=client)
# print(output)
# output_parse = output.split("Tool:")
# print(output_parse)
# if output_parse[1].strip(' ').startswith("Simple_Search"):
# print(gds_db.run(question=query, history=[], client=client))
# if output_parse[1].strip(' ').startswith("Depth_Search"):
# print('here')
print(gds_db.run(question=query))
|
CloudWhisperCustomBot | app/api_discovery/node_rel.json | {
"v1/ibm/vpcs": {
"Nodes": [
"VPC",
"Acl",
"AddressPrefix",
"VirtualServerInstanceGroup",
"VirtualServerInstance",
"KubernetesCluster",
"LoadBalancer",
"NetworkAcl",
"PublicGateway",
"RoutingTable",
"SecurityGroup",
"Subnet",
"Tag",
"VpnGateway",
"IBMCloud",
"Region",
"ResourceGroup",
"Zone",
"Image",
"Profile"
],
"Relationships": [
{
"source": "VPC",
"target": "Acl",
"relationship": "HAS_ACL"
},
{
"source": "VPC",
"target": "AddressPrefix",
"relationship": "HAS_ADDRESS_PREFIX"
},
{
"source": "AddressPrefix",
"target": "Zone",
"relationship": "LOCATED_IN"
},
{
"source": "VPC",
"target": "VirtualServerInstanceGroup",
"relationship": "HAS_INSTANCE_GROUP"
},
{
"source": "VPC",
"target": "VirtualServerInstance",
"relationship": "HAS_INSTANCE"
},
{
"source": "VirtualServerInstance",
"target": "Zone",
"relationship": "LOCATED_IN"
},
{
"source": "VirtualServerInstance",
"target": "Image",
"relationship": "HAS_IMAGE"
},
{
"source": "VirtualServerInstance",
"target": "Profile",
"relationship": "HAS_PROFILE"
},
{
"source": "VPC",
"target": "KubernetesCluster",
"relationship": "HAS_KUBERNETES_CLUSTER"
},
{
"source": "VPC",
"target": "LoadBalancer",
"relationship": "HAS_LOAD_BALANCER"
},
{
"source": "VPC",
"target": "NetworkACL",
"relationship": "HAS_NETWORK_ACL"
},
{
"source": "VPC",
"target": "PublicGateway",
"relationship": "HAS_PUBLIC_GATEWAY"
},
{
"source": "PublicGateway",
"target": "Subnet",
"relationship": "HAS_SUBNET"
},
{
"source": "VPC",
"target": "RoutingTable",
"relationship": "HAS_ROUTING_TABLE"
},
{
"source": "VPC",
"target": "SecurityGroup",
"relationship": "HAS_SECURITY_GROUP"
},
{
"source": "SecurityGroup",
"target": "ResourceGroup",
"relationship": "HAS_RESOURCE_GROUP"
},
{
"source": "VPC",
"target": "Subnet",
"relationship": "HAS_SUBNET"
},
{
"source": "VPC",
"target": "Tag",
"relationship": "HAS_TAG"
},
{
"source": "VPC",
"target": "VPNGateway",
"relationship": "HAS_VPN_GATEWAY"
},
{
"source": "VPC",
"target": "Region",
"relationship": "LOCATED_IN"
},
{
"source": "VPC",
"target": "ResourceGroup",
"relationship": "PART_OF"
},
{
"source": "VPC",
"target": "IBMCloud",
"relationship": "PART_OF"
},
{
"source": "Subnet",
"target": "Zone",
"relationship": "LOCATED_IN"
},
{
"source": "Subnet",
"target": "AddressPrefix",
"relationship": "USES_ADDRESS_PREFIX"
},
{
"source": "Subnet",
"target": "PublicGateway",
"relationship": "CONNECTED_TO_PUBLIC_GATEWAY"
},
{
"source": "VpnGateway",
"target": "Subnet",
"relationship": "CONNECTED_TO_SUBNET"
},
{
"source": "Tag",
"target": "ResourceGroup",
"relationship": "TAGGED_RESOURCE_GROUP"
},
{
"source": "LoadBalancer",
"target": "Subnet",
"relationship": "ASSOCIATED_WITH_SUBNET"
}
],
"Location": {
"VPC": "items[*]",
"Acl": "items[*].associated_resources.network_acls[*]",
"AddressPrefix": "items[*].associated_resources.address_prefixes[*]",
"VirtualServerInstanceGroup": "items[*].associated_resources.instance_groups[*]",
"VirtualServerInstance": "items[*].associated_resources.instances[*]",
"Image": "items[*].associated_resources.instances[*].image",
"Profile": "items[*].associated_resources.instances[*].profile",
"KubernetesCluster": "items[*].associated_resources.kubernetes_clusters[*]",
"LoadBalancer": "items[*].associated_resources.load_balancers[*]",
"NetworkAcl": "items[*].associated_resources.network_acls[*]",
"PublicGateway": "items[*].associated_resources.public_gateways[*]",
"RoutingTable": "items[*].associated_resources.routing_tables[*]",
"SecurityGroup": "items[*].associated_resources.security_groups[*]",
"Subnet": {
"VPC": "items[*].associated_resources.subnets[*]",
"PublicGateway": "items[*].associated_resources.public_gateways[*].subnets[*]"
},
"Tag": "items[*].associated_resources.tags[*]",
"VpnGateway": "items[*].associated_resources.vpn_gateways[*]",
"IBMCloud": "items[*].ibm_cloud",
"Region": "items[*].region",
"ResourceGroup": {
"VPC": "items[*].resource_group",
"SecurityGroup": "items[*].associated_resources.security_groups[*].resource_group"
},
"Zone": {
"Subnet": "items[*].associated_resources.subnets[*].zone",
"VirtualServerInstance": "items[*].associated_resources.instances[*].zone",
"PublicGateway": "items[*].associated_resources.public_gateways[*].zone",
"AddressPrefix": "items[*].associated_resources.address_prefixes[*].zone",
"Image": "items[*].associated_resources.instance[*].image.zones[*]"
}
},
"Attribute": {
"VPC": [
"id",
"name",
"status",
"classic_access",
"created_at"
],
"Zone": [
"id",
"name",
"display_name"
],
"Acl": [
"id",
"is_default",
"name"
],
"AddressPrefix": [
"cidr",
"id",
"is_default",
"name"
],
"VirtualServerInstanceGroup": [
"id",
"name"
],
"VirtualServerInstance": [
"id",
"name",
"status",
"usage",
"primary_network_interface"
],
"Image": [
"id",
"name",
"os_vendor",
"display_name",
"architecture"
],
"Profile": [
"display_name",
"family",
"id",
"name"
],
"KubernetesCluster": [
"cluster_type",
"id",
"master_kube_version",
"name"
],
"LoadBalancer": [
"hostname",
"id",
"name"
],
"NetworkAcl": [
"id",
"is_default",
"name"
],
"PublicGateway": [
"id",
"name"
],
"RoutingTable": [
"id",
"name"
],
"SecurityGroup": [
"id",
"name"
],
"Subnet": [
"id",
"ipv4_cidr_block",
"name"
],
"Tag": [
"id",
"tag_name"
],
"VpnGateway": [
"id",
"name"
],
"IBMCloud": [
"id",
"name"
],
"Region": [
"display_name",
"id",
"name"
],
"ResourceGroup": [
"id",
"name"
],
"TTL": [
"expires_at",
"id"
]
}
},
"v1/ibm/clouds": {
"Nodes": [
"IBMCloud",
"Region",
"Zone",
"Settings"
],
"Relationships": [
{
"source": "IBMCloud",
"target": "Region",
"relationship": "HAS_REGION"
},
{
"source": "Region",
"target": "Zone",
"relationship": "HAS_ZONE"
},
{
"source": "IBMCloud",
"target": "Settings",
"relationship": "HAS_SETTINGS"
}
],
"Location": {
"IBMCloud": "items[*]",
"Region": "items[*].monitoring_tokens[*].region",
"Zone": "items[*].monitoring_tokens[*].region.zones[*]",
"Settings": "items[*].settings"
},
"Attribute": {
"IBMCloud": [
"deleted",
"id",
"name",
"status",
"sync_status",
"total_cost",
"user_id",
"monitoring_tokens"
],
"Region": [
"display_name",
"id",
"name"
],
"Zone": [
"display_name",
"id",
"name",
"status"
],
"Settings": [
"cloud_id",
"cost_optimization_enabled",
"id"
]
}
},
"v1/ibm/geography/regions": {
"Nodes": [
"Region",
"Zone",
"IBMCloud"
],
"Relationships": [
{
"source": "Region",
"target": "Zone",
"relationship": "HAS_ZONE"
},
{
"source": "IBMCloud",
"target": "Region",
"relationship": "HAS_REGION"
}
],
"Location": {
"Region": "items[*]",
"Zone": "items[*].zones[*]",
"IBMCloud": "items[*].ibm_cloud"
},
"Attribute": {
"Region": [
"display_name",
"id",
"name",
"status"
],
"Zone": [
"display_name",
"id",
"name",
"status"
],
"IBMCloud": [
"id",
"name"
]
}
},
"v1/ibm/cloud_object_storages/buckets": {
"Nodes": [
"COSBucket",
"IBMCloud",
"Region",
"CloudObjectStorageInstance",
"COSBucketVersioning"
],
"Relationships": [
{
"source": "IBMCloud",
"target": "COSBucket",
"relationship": "HAS"
},
{
"source": "COSBucket",
"target": "Region",
"relationship": "LOCATED_IN"
},
{
"source": "CloudObjectStorageInstance",
"target": "COSBucket",
"relationship": "CONTAINS"
},
{
"source": "COSBucket",
"target": "COSBucketVersioning",
"relationship": "HAS_VERSIONING"
}
],
"Location": {
"COSBucket": "items[*]",
"IBMCloud": "items[*].ibm_cloud",
"Region": "items[*].regions[*]",
"CloudObjectStorageInstance": "items[*].cloud_object_storage",
"COSBucketVersioning": "items[*].cos_bucket_versioning"
},
"Attribute": {
"IBMCloud": [
"id",
"name"
],
"Region": [
"id",
"name",
"display_name"
],
"CloudObjectStorageInstance": [
"id",
"name"
],
"COSBucket": [
"id",
"name",
"created_at",
"location_constraint",
"resiliency",
"size",
"total_objects",
"type"
],
"COSBucketVersioning": [
"id",
"mfa_delete",
"objects",
"size",
"status",
"total_objects"
]
}
},
"v1/ibm/cloud_object_storages": {
"Nodes": [
"CloudObjectStorageInstance",
"IBMCloud"
],
"Relationships": [
{
"source": "IBMCloud",
"target": "CloudObjectStorageInstance",
"relationship": "HAS"
}
],
"Location": {
"CloudObjectStorageInstance": "items[*]",
"IBMCloud": "items[*].ibm_cloud"
},
"Attribute": {
"IBMCloud": [
"id",
"name"
],
"CloudObjectStorageInstance": [
"created_at",
"crn",
"id",
"locked",
"migrated",
"name",
"updated_at"
]
}
},
"v1/ibm/kubernetes_clusters": {
"Nodes": [
"KubernetesCluster",
"VPC",
"IBMCloud",
"Region",
"ResourceGroup",
"WorkerPool",
"Zone",
"Subnet"
],
"Relationships": [
{
"source": "KubernetesCluster",
"target": "VPC",
"relationship": "ASSOCIATED_WITH"
},
{
"source": "KubernetesCluster",
"target": "IBMCloud",
"relationship": "BELONGS_TO"
},
{
"source": "KubernetesCluster",
"target": "Region",
"relationship": "LOCATED_IN"
},
{
"source": "KubernetesCluster",
"target": "ResourceGroup",
"relationship": "PART_OF"
},
{
"source": "WorkerPool",
"target": "KubernetesCluster",
"relationship": "PART_OF"
},
{
"source": "WorkerPool",
"target": "Zone",
"relationship": "LOCATED_IN"
},
{
"source": "Subnet",
"target": "Zone",
"relationship": "LOCATED_IN"
},
{
"source": "WorkerPool",
"target": "Subnet",
"relationship": "CONTAINS"
}
],
"Location": {
"KubernetesCluster": "items[*]",
"VPC": "items[*].associated_resources.vpc",
"IBMCloud": "items[*].ibm_cloud",
"Region": "items[*].region",
"ResourceGroup": "items[*].resource_group",
"WorkerPool": "items[*].worker_pools[*]",
"Zone": {
"WorkerPool": "items[*].worker_pools[*].worker_zones[*]",
"Subnet": "items[*].worker_pools[*].worker_zones[*].subnets[*].zone"
},
"Subnet": "items[*].worker_pools[*].worker_zones[*].subnets[*]"
},
"Attribute": {
"KubernetesCluster": [
"id",
"name",
"cluster_type",
"disable_public_service_endpoint",
"master_kube_version",
"pod_subnet",
"provider",
"status",
"workloads",
"service_endpoint",
"ingress"
],
"VPC": [
"id",
"name"
],
"IBMCloud": [
"id",
"name"
],
"Ingress": [
"hostname",
"message",
"secret_name",
"status"
],
"Region": [
"id",
"name",
"display_name"
],
"ResourceGroup": [
"id",
"name"
],
"WorkerPool": [
"id",
"name",
"disk_encryption",
"flavor",
"resource_id",
"status",
"worker_count"
],
"Zone": [
"id",
"name",
"private_vlan"
],
"Subnet": [
"id",
"ipv4_cidr_block",
"name"
],
"ServiceEndpoint": [
"private_service_endpoint_enabled",
"private_service_endpoint_url",
"public_service_endpoint_enabled",
"public_service_endpoint_url"
]
}
},
"v1/ibm/cloud_object_storages/keys": {
"Nodes": [
"COSAccessKey",
"IBMCloud",
"CloudObjectStorageInstance"
],
"Relationships": [
{
"source": "IBMCloud",
"target": "CloudObjectStorageInstance",
"relationship": "HAS"
},
{
"source": "CloudObjectStorageInstance",
"target": "COSAccessKey",
"relationship": "HAS"
}
],
"Location": {
"COSAccessKey": "items[*]",
"IBMCloud": "items[*].ibm_cloud",
"CloudObjectStorageInstance": "items[*].cloud_object_storage"
},
"Attribute": {
"IBMCloud": [
"id",
"name"
],
"CloudObjectStorageInstance": [
"id",
"name"
],
"COSAccessKey": [
"id",
"name",
"created_at",
"updated_at",
"iam_role_crn",
"iam_service_id_crn",
"is_hmac",
"migrated",
"role",
"state"
]
}
},
"v1/ibm/instances": {
"Nodes": [
"VirtualServerInstance",
"BootVolumeAttachment",
"Volume",
"Profile",
"Zone",
"SSHKey",
"NetworkInterface",
"DedicatedHost",
"DedicatedHostGroup",
"PlacementGroup",
"PrimaryNetworkInterface",
"VolumeAttachment",
"Disk",
"IBMCloud",
"Image",
"Region",
"ResourceGroup",
"Tag",
"VPC"
],
"Relationships": [
{
"source": "VirtualServerInstance",
"target": "BootVolumeAttachment",
"relationship": "HAS_BOOT_VOLUME_ATTACHMENT"
},
{
"source": "BootVolumeAttachment",
"target": "Volume",
"relationship": "USES_VOLUME"
},
{
"source": "Volume",
"target": "Profile",
"relationship": "HAS_PROFILE"
},
{
"source": "VirtualServerInstance",
"target": "Profile",
"relationship": "HAS_PROFILE"
},
{
"source": "Volume",
"target": "Zone",
"relationship": "LOCATED_IN_ZONE"
},
{
"source": "VirtualServerInstance",
"target": "SSHKey",
"relationship": "USES_SSH_KEY"
},
{
"source": "VirtualServerInstance",
"target": "NetworkInterface",
"relationship": "HAS_NETWORK_INTERFACE"
},
{
"source": "VirtualServerInstance",
"target": "DedicatedHost",
"relationship": "PLACED_ON_DEDICATED_HOST"
},
{
"source": "VirtualServerInstance",
"target": "DedicatedHostGroup",
"relationship": "PLACED_IN_DEDICATED_HOST_GROUP"
},
{
"source": "VirtualServerInstance",
"target": "PlacementGroup",
"relationship": "PLACED_IN_PLACEMENT_GROUP"
},
{
"source": "VirtualServerInstance",
"target": "PrimaryNetworkInterface",
"relationship": "HAS_PRIMARY_NETWORK_INTERFACE"
},
{
"source": "VirtualServerInstance",
"target": "VolumeAttachment",
"relationship": "HAS_VOLUME_ATTACHMENT"
},
{
"source": "VolumeAttachment",
"target": "Volume",
"relationship": "USES_VOLUME"
},
{
"source": "VirtualServerInstance",
"target": "Disk",
"relationship": "HAS_DISK"
},
{
"source": "VirtualServerInstance",
"target": "IBMCloud",
"relationship": "BELONGS_TO_IBM_CLOUD"
},
{
"source": "VirtualServerInstance",
"target": "Image",
"relationship": "USES_IMAGE"
},
{
"source": "Image",
"target": "Zone",
"relationship": "LOCATED_IN_ZONE"
},
{
"source": "VirtualServerInstance",
"target": "Region",
"relationship": "LOCATED_IN_REGION"
},
{
"source": "VirtualServerInstance",
"target": "ResourceGroup",
"relationship": "BELONGS_TO_RESOURCE_GROUP"
},
{
"source": "VirtualServerInstance",
"target": "Tag",
"relationship": "HAS_TAG"
},
{
"source": "VirtualServerInstance",
"target": "VPC",
"relationship": "BELONGS_TO_VPC"
},
{
"source": "VirtualServerInstance",
"target": "Zone",
"relationship": "LOCATED_IN_ZONE"
}
],
"Location": {
"VirtualServerInstance": "items[*]",
"BootVolumeAttachment": "items[*].associated_resources.boot_volume_attachment",
"Volume": {
"boot_volume_attachment": "items[*].associated_resources.boot_volume_attachment.volume",
"VolumeAttachment": "items[*].associated_resources.volume_attachments[*].volume"
},
"Profile": {
"Volume": "items[*].associated_resources.boot_volume_attachment.volume.profile",
"VirtualServerInstance": "items[*].profile"
},
"Zone": {
"Volume": "items[*].associated_resources.boot_volume_attachment.volume.zone",
"VirtualServerInstance": "items[*].zone",
"Image": "items[*].image.zones[*]",
"Profile": "items[*].profile.zones[*]"
},
"SSHKey": "items[*].associated_resources.keys[*]",
"NetworkInterface": "items[*].associated_resources.network_interfaces[*]",
"DedicatedHost": "items[*].associated_resources.placement_target.dedicated_host",
"DedicatedHostGroup": "items[*].associated_resources.placement_target.dedicated_host_group",
"PlacementGroup": "items[*].associated_resources.placement_target.placement_group",
"PrimaryNetworkInterface": "items[*].associated_resources.primary_network_interface",
"VolumeAttachment": "items[*].associated_resources.volume_attachments[*]",
"Disk": "items[*].disks[*]",
"IBMCloud": "items[*].ibm_cloud",
"Image": "items[*].image",
"Region": "items[*].region",
"ResourceGroup": "items[*].resource_group",
"Tag": "items[*].tags[*]",
"VPC": "items[*].vpc"
},
"Attribute": {
"VirtualServerInstance": [
"bandwidth",
"created_at",
"crn",
"href",
"id",
"memory",
"name",
"resource_id",
"startable",
"status",
"status_reasons",
"usage",
"vcpu",
"volume_migration_report",
"gpu"
],
"BootVolumeAttachment": [
"delete_volume_on_instance_delete",
"id",
"name",
"type"
],
"Volume": [
"capacity",
"id",
"iops",
"name"
],
"Profile": [
"id",
"name",
"family",
"display_name"
],
"Zone": [
"display_name",
"id",
"name"
],
"SSHKey": [
"id",
"name"
],
"NetworkInterface": [
"allow_ip_spoofing",
"id",
"is_primary",
"name",
"primary_ipv4_address"
],
"DedicatedHost": [
"display_name",
"id",
"name"
],
"DedicatedHostGroup": [
"display_name",
"id",
"name"
],
"PlacementGroup": [
"display_name",
"id",
"name"
],
"PrimaryNetworkInterface": [
"allow_ip_spoofing",
"id",
"is_primary",
"name",
"primary_ipv4_address"
],
"VolumeAttachment": [
"delete_volume_on_instance_delete",
"id",
"name",
"type"
],
"Disk": [
"id",
"name"
],
"IBMCloud": [
"id",
"name"
],
"Image": [
"architecture",
"display_name",
"id",
"name",
"os_vendor"
],
"Region": [
"display_name",
"id",
"name"
],
"ResourceGroup": [
"id",
"name"
],
"Tag": [
"id",
"tag_name"
],
"VPC": [
"id",
"name"
]
}
},
"v1/ibm/backup_policies": {
"Nodes": [
"BackupPolicy",
"IBMCloud",
"Region",
"ResourceGroup",
"BackupPolicyJob",
"BackupPolicyPlan",
"VirtualServerInstanceTemplate"
],
"Relationships": [
{
"source": "IBMCloud",
"target": "BackupPolicy",
"relationship": "HAS_POLICY"
},
{
"source": "BackupPolicy",
"target": "IBMCloud",
"relationship": "PART_OF"
},
{
"source": "Region",
"target": "BackupPolicy",
"relationship": "CONTAINS"
},
{
"source": "ResourceGroup",
"target": "BackupPolicy",
"relationship": "CONTAINS"
},
{
"source": "BackupPolicy",
"target": "BackupPolicyJob",
"relationship": "HAS"
},
{
"source": "BackupPolicy",
"target": "BackupPolicyPlan",
"relationship": "HAS"
},
{
"source": "BackupPolicy",
"target": "VirtualServerInstanceTemplate",
"relationship": "HAS"
}
],
"Location": {
"BackupPolicy": "items[*]",
"IBMCloud": "items[*].ibm_cloud",
"Region": "items[*].region",
"ResourceGroup": "items[*].resource_group",
"BackupPolicyJob": "items[*].associated_resources.backup_policy_jobs[*]",
"BackupPolicyPlan": "items[*].associated_resources.backup_policy_plans[*]",
"VirtualServerInstanceTemplate": "items[*].associated_resources.instance_templates[*]"
},
"Attribute": {
"IBMCloud": [
"id",
"name"
],
"Region": [
"id",
"name",
"display_name"
],
"ResourceGroup": [
"id",
"name"
],
"BackupPolicy": [
"id",
"name",
"created_at",
"crn",
"health_reason",
"health_state",
"href",
"resource_id",
"resource_type",
"status",
"match_resource_types",
"match_user_tags"
],
"BackupPolicyJob": [
"id",
"name"
],
"BackupPolicyPlan": [
"id",
"name"
],
"VirtualServerInstanceTemplate": [
"id",
"name"
]
}
},
"v1/ibm/draas_blueprints?resource_type=IBMKubernetesCluster": {
"Nodes": [
"IKSBackupDetails",
"IKSBackup",
"KubernetesCluster",
"IBMCloud"
],
"Relationships": [
{
"source": "IKSBackupDetails",
"target": "IKSBackup",
"relationship": "HAS_BACKUP"
},
{
"source": "IKSBackupDetails",
"target": "IBMCloud",
"relationship": "LOCATED_IN"
},
{
"source": "IKSBackup",
"target": "IKSBackupDetails",
"relationship": "PART_OF"
},
{
"source": "IKSBackup",
"target": "KubernetesCluster",
"relationship": "OF"
},
{
"source": "IKSBackup",
"target": "Tag",
"relationship": "HAS_TAG"
}
],
"Location": {
"IKSBackupDetails": "items[*]",
"IKSBackup": "items[*].backups[*]",
"Tag": "items[*].backups[*].tags[*]",
"KubernetesCluster": "items[*].resource_metadata.resource_json",
"IBMCloud": "items[*].ibm_cloud"
},
"Attribute": {
"IKSBackupDetails": [
"created_at",
"description",
"id",
"name",
"next_backup_scheduled_at",
"resource_metadata",
"resource_type",
"scheduled_policy",
"resource_type",
"scheduled_policy_state",
"total_backups"
],
"IKSBackup": [
"completed_at",
"id",
"is_volume",
"name",
"objects_changed",
"scheduled",
"started_at",
"status"
],
"KubernetesCluster": [
"id"
],
"Tag": [
"id",
"name"
],
"IBMCloud": [
"id"
]
}
},
"v1/ibm/draas_blueprints?resource_type=IBMVpcNetwork": {
"Nodes": [
"VPCBackup",
"VPCBackupVersion",
"VPC",
"IBMCloud"
],
"Relationships": [
{
"source": "VPCBackup",
"target": "VPCBackupVersion",
"relationship": "HAS_VERSION"
},
{
"source": "VPCBackup",
"target": "IBMCloud",
"relationship": "LOCATED_IN"
},
{
"source": "VPCBackupVersion",
"target": "VPCBackup",
"relationship": "PART_OF"
},
{
"source": "VPCBackup",
"target": "VPC",
"relationship": "OF"
},
{
"source": "VPCBackupVersion",
"target": "Tag",
"relationship": "HAS_TAG"
}
],
"Location": {
"VPCBackup": "items[*]",
"VPCBackupVersion": "items[*].backups[*]",
"Tag": "items[*].backups[*].tags[*]",
"VPC": "items[*].resource_metadata.vpc",
"IBMCloud": "items[*].ibm_cloud"
},
"Attribute": {
"VPCBackup": [
"created_at",
"description",
"id",
"name",
"next_backup_scheduled_at",
"resource_metadata",
"resource_type",
"scheduled_policy",
"resource_type",
"scheduled_policy",
"scheduled_policy_state",
"total_backups"
],
"VPCBackupVersion": [
"backup_metadata",
"completed_at",
"id",
"is_volume",
"name",
"objects_changed",
"scheduled",
"started_at",
"status"
],
"KubernetesCluster": [
"id"
],
"Tag": [
"id",
"name"
],
"IBMCloud": [
"id"
],
"VPC": [
"id",
"name"
]
}
},
"v1/ibm/draas/cos-buckets/backups": {
"Nodes": [
"COSBucketBackup",
"COSBucketBackupDetails",
"COSBucket"
],
"Relationships": [
{
"source": "COSBucket",
"target": "COSBucketBackup",
"relationship": "HAS_BACKUP"
},
{
"source": "COSBucketBackup",
"target": "COSBucketBackupDetails",
"relationship": "PART_OF"
}
],
"Location": {
"COSBucketBackup": "items[*]",
"COSBucketBackupDetails": "items[*].draas_blueprint",
"COSBucket": "items[*].source_bucket"
},
"Attribute": {
"COSBucketBackup": [
"backup_summary",
"completed_at",
"id",
"name",
"copied_objects",
"message",
"next_backup_scheduled_at",
"objects_in_source_bucket_before_backup",
"scheduled",
"skipped_objects",
"source_bucket",
"started_at",
"status",
"tags",
"target_bucket",
"total_changed_objects",
"total_changed_size"
],
"COSBucketBackupDetails": [
"id",
"name"
],
"COSBucket": [
"id"
]
}
},
"v1/ibm/subnets": {
"Nodes": [
"Subnet",
"AddressPrefix",
"Zone",
"LoadBalancer",
"Acl",
"NetworkInterface",
"PublicGateway",
"VPC",
"VpnGateway",
"Region",
"ResourceGroup",
"RoutingTable"
],
"Relationships": [
{
"source": "AddressPrefix",
"target": "Zone",
"relationship": "RESIDES_IN"
},
{
"source": "LoadBalancer",
"target": "Vpc",
"relationship": "BELONGS_TO"
},
{
"source": "Acl",
"target": "Vpc",
"relationship": "BELONGS_TO"
},
{
"source": "NetworkInterface",
"target": "Subnet",
"relationship": "BELONGS_TO"
},
{
"source": "PublicGateway",
"target": "Vpc",
"relationship": "BELONGS_TO"
},
{
"source": "VpnGateway",
"target": "Vpc",
"relationship": "BELONGS_TO"
},
{
"source": "Vpc",
"target": "Region",
"relationship": "LOCATED_IN"
},
{
"source": "Vpc",
"target": "ResourceGroup",
"relationship": "PART_OF"
},
{
"source": "RoutingTable",
"target": "Vpc",
"relationship": "BELONGS_TO"
},
{
"source": "Subnet",
"target": "Vpc",
"relationship": "PART_OF"
},
{
"source": "Subnet",
"target": "Zone",
"relationship": "RESIDES_IN"
}
],
"Location": {
"Subnet": "items[*]",
"AddressPrefix": "items[*].associated_resources.address_prefix",
"Zone": "items[*].associated_resources.address_prefix.zone",
"LoadBalancer": "items[*].associated_resources.load_balancers",
"Acl": "items[*].associated_resources.network_acl",
"NetworkInterface": "items[*].associated_resources.network_interfaces",
"PublicGateway": "items[*].associated_resources.public_gateway",
"VPC": "items[*].associated_resources.vpc",
"VpnGateway": "items[*].associated_resources.vpn_gateways",
"Region": "items[*].region",
"ResourceGroup": "items[*].resource_group",
"RoutingTable": "items[*].routing_table"
},
"Attribute": {
"AddressPrefix": [
"cidr",
"id",
"is_default",
"name"
],
"Zone": [
"id",
"name",
"display_name"
],
"LoadBalancer": [
"hostname",
"id",
"name"
],
"NetworkAcl": [
"id",
"is_default",
"name"
],
"NetworkInterface": [
"allow_ip_spoofing",
"id",
"is_primary",
"name",
"primary_ipv4_address"
],
"PublicGateway": [
"id",
"name"
],
"VPC": [
"id",
"name"
],
"VpnGateway": [
"id",
"name"
],
"Region": [
"id",
"name",
"display_name"
],
"ResourceGroup": [
"id",
"name"
],
"RoutingTable": [
"id"
],
"Subnet": [
"available_ipv4_address_count",
"created_at",
"id",
"ip_version",
"ipv4_cidr_block",
"name",
"status",
"total_ipv4_address_count"
],
"Acl": [
"id",
"is_default",
"name"
]
}
},
"v1/ibm/resource_groups": {
"Nodes": [
"ResourceGroup"
],
"Relationships": [],
"Location": {
"ResourceGroup": "items[*]"
},
"Attribute": {
"ResourceGroup": [
"id",
"name"
]
}
},
"v1/ibm/instance_profiles": {
"Nodes": [
"VSITemplate",
"IBMCloud",
"Region"
],
"Relationships": [
{
"source": "VirtualServerInstanceTemplate",
"target": "IBMCloud",
"relationship": "PART_OF"
},
{
"source": "VirtualServerInstanceTemplate",
"target": "Region",
"relationship": "LOCATED_IN"
}
],
"Location": {
"VSITemplate": "items[*]",
"Region": "items[*].region",
"IBMCloud": "items[*].ibm_cloud"
},
"Attribute": {
"VSITemplate": [
"id",
"name",
"bandwidth",
"disks",
"family",
"gpu_count",
"gpu_manufacturer",
"gpu_memory",
"gpu_model",
"href",
"memory",
"os_architecture",
"port_speed",
"total_volume_bandwidth",
"vcpu_architecture",
"vcpu_count"
],
"IBMCloud": [
"id",
"name"
],
"Region": [
"id",
"name",
"display_name"
]
}
},
"v1/ibm/instance_template_constructs": {
"Nodes": [
"VirtualServerInstanceBackup",
"IBMCloud",
"Region",
"BackupPolicy"
],
"Relationships": [
{
"source": "VirtualServerInstanceBackup",
"target": "IBMCloud",
"relationship": "PART_OF"
},
{
"source": "Region",
"target": "VirtualServerInstanceBackup",
"relationship": "CONTAINS"
},
{
"source": "VirtualServerInstanceBackup",
"target": "Region",
"relationship": "EXISTS_IN"
},
{
"source": "VirtualServerInstanceBackup",
"target": "BackupPolicy",
"relationship": "HAS_POLICY"
},
{
"source": "VirtualServerInstanceBackup",
"target": "VirtualServerInstance",
"relationship": "BACKUP_OF"
},
{
"source": "VirtualServerInstance",
"target": "VirtualServerInstanceBackup",
"relationship": "HAS_BACKUP"
}
],
"Location": {
"VirtualServerInstanceBackup": "items[*]",
"Region": "items[*].region",
"IBMCloud": "items[*].ibm_cloud",
"VirtualServerInstance": "items[*].resource_json.source_instance",
"BackupPolicy": "items[*].backup_policies[*]"
},
"Attribute": {
"VirtualServerInstanceBackup": [
"id",
"name",
"migration_json",
"resource_json"
],
"IBMCloud": [
"id",
"name"
],
"Region": [
"id",
"name",
"display_name"
],
"BackupPolicy": [
"id",
"name"
]
}
},
"v1/ibm/clouds/<cloud_id>/idle-resources": {
"Nodes": [
"IBMCloud",
"Snapshot",
"FloatingIP",
"Image",
"IdleResourceAnalysis"
],
"Relationships": [
{
"source": "IBMCloud",
"target": "FloatingIP",
"relationship": "HAS_FLOATING_IP"
},
{
"source": "IBMCloud",
"target": "Snapshot",
"relationship": "HAS_SNAPSHOT"
},
{
"source": "IBMCloud",
"target": "Image",
"relationship": "HAS_IMAGE"
},
{
"source": "Snapshot",
"target": "IdleResourceAnalysis",
"relationship": "IS_IDLE"
},
{
"source": "FloatingIP",
"target": "IdleResourceAnalysis",
"relationship": "IS_IDLE"
},
{
"source": "Image",
"target": "IdleResourceAnalysis",
"relationship": "IS_IDLE"
}
],
"Location": {
"IBMCloud": "items[*]",
"Snapshot": "items[*].resources.snapshots[*]",
"FloatingIP": "items[*].resources.floating_ips[*]",
"Image": "items[*].resources.custom_images[*]",
"IdleResourceAnalysis": {
"Snapshot": "items[*].resources.snapshots[*].idle_data",
"FloatingIP": "items[*].resources.floating_ips[*].idle_data",
"Image": "items[*].resources.custom_images[*].idle_data"
}
},
"Attribute": {
"IBMCloud": [
"id"
],
"Snapshot": [
"id",
"cost"
],
"FloatingIP": [
"id",
"cost"
],
"Image": [
"id",
"cost"
],
"IdleResourceAnalysis": [
"id",
"marked_at",
"estimated_savings",
"reason",
"resource_type",
"source_type",
"status"
]
}
},
"v1/ibm/snapshots": {
"Nodes": [
"Snapshot",
"OperatingSystem",
"Region",
"ResourceGroup",
"SourceImage",
"Volume",
"Profile",
"Zone"
],
"Relationships": [
{
"source": "Snapshot",
"target": "OperatingSystem",
"relationship": "HAS_OPERATING_SYSTEM"
},
{
"source": "Snapshot",
"target": "Region",
"relationship": "LOCATED_IN"
},
{
"source": "Snapshot",
"target": "ResourceGroup",
"relationship": "BELONGS_TO"
},
{
"source": "Snapshot",
"target": "SourceImage",
"relationship": "DERIVED_FROM"
},
{
"source": "Snapshot",
"target": "Volume",
"relationship": "HAS_SOURCE_VOLUME"
},
{
"source": "Volume",
"target": "Profile",
"relationship": "USES_PROFILE"
},
{
"source": "Volume",
"target": "Zone",
"relationship": "LOCATED_IN"
}
],
"Location": {
"Snapshot": "items[*]",
"OperatingSystem": "items[*].operating_system",
"Region": "items[*].region",
"ResourceGroup": "items[*].resource_group",
"SourceImage": "items[*].source_image",
"Volume": "items[*].source_volume",
"Profile": "items[*].source_volume.profile",
"Zone": "items[*].source_volume.zone"
},
"Attribute": {
"Snapshot": [
"bootable",
"created_at",
"crn",
"encryption",
"encryption_key_crn",
"href",
"id",
"minimum_capacity",
"name",
"resource_type",
"size",
"status"
],
"OperatingSystem": [
"architecture",
"id",
"name",
"vendor"
],
"Region": [
"display_name",
"id",
"name"
],
"ResourceGroup": [
"id",
"name"
],
"SourceImage": [
"id",
"name"
],
"Volume": [
"capacity",
"id",
"iops",
"name"
],
"Profile": [
"id",
"name"
],
"Zone": [
"display_name",
"id",
"name"
]
}
},
"v1/ibm/floating_ips": {
"Nodes": [
"FloatingIP",
"IBMCloud",
"Region",
"ResourceGroup",
"Target",
"Instance",
"Zone"
],
"Relationships": [
{
"source": "FloatingIP",
"target": "IBMCloud",
"relationship": "has_ibm_cloud"
},
{
"source": "FloatingIP",
"target": "Region",
"relationship": "located_in_region"
},
{
"source": "FloatingIP",
"target": "ResourceGroup",
"relationship": "belongs_to_resource_group"
},
{
"source": "FloatingIP",
"target": "Target",
"relationship": "has_target"
},
{
"source": "Target",
"target": "Instance",
"relationship": "has_instance"
},
{
"source": "FloatingIP",
"target": "Zone",
"relationship": "located_in_zone"
}
],
"Location": {
"FloatingIP": "items[*]",
"IBMCloud": "items[*].ibm_cloud",
"Region": "items[*].region",
"ResourceGroup": "items[*].resource_group",
"Target": "items[*].target",
"Instance": "items[*].target.instance",
"Zone": "items[*].zone"
},
"Attribute": {
"FloatingIP": [
"address",
"created_at",
"crn",
"href",
"id",
"name",
"resource_id",
"status"
],
"IBMCloud": [
"id",
"name"
],
"Region": [
"display_name",
"id",
"name"
],
"ResourceGroup": [
"id",
"name"
],
"Target": [
"id",
"name",
"resource_type"
],
"Instance": [
"id",
"name"
],
"Zone": [
"display_name",
"id",
"name"
]
}
},
"v1/ibm/images": {
"Nodes": [
"Image",
"IBMCloud",
"OperatingSystem",
"Region",
"ResourceGroup",
"SourceVolume",
"Profile",
"Zone"
],
"Relationships": [
{
"source": "Image",
"target": "IBMCloud",
"relationship": "has_ibm_cloud"
},
{
"source": "Image",
"target": "OperatingSystem",
"relationship": "has_operating_system"
},
{
"source": "Image",
"target": "Region",
"relationship": "located_in_region"
},
{
"source": "Image",
"target": "ResourceGroup",
"relationship": "belongs_to_resource_group"
},
{
"source": "Image",
"target": "SourceVolume",
"relationship": "has_source_volume"
},
{
"source": "SourceVolume",
"target": "Profile",
"relationship": "has_profile"
},
{
"source": "SourceVolume",
"target": "Zone",
"relationship": "located_in_zone"
}
],
"Location": {
"Image": "items[*]",
"IBMCloud": "items[*].ibm_cloud",
"OperatingSystem": "items[*].operating_system",
"Region": "items[*].region",
"ResourceGroup": "items[*].resource_group",
"SourceVolume": "items[*].source_volume",
"Profile": "items[*].source_volume.profile",
"Zone": "items[*].source_volume.zone"
},
"Attribute": {
"Image": [
"created_at",
"crn",
"encryption",
"encryption_key_crn",
"file_checksums_sha256",
"file_size",
"href",
"ibm_status_reasons",
"id",
"minimum_provisioned_size",
"name",
"resource_id",
"source",
"status",
"visibility"
],
"IBMCloud": [
"id",
"name"
],
"OperatingSystem": [
"architecture",
"dedicated_host_only",
"display_name",
"family",
"href",
"id",
"name",
"vendor",
"version"
],
"Region": [
"display_name",
"id",
"name"
],
"ResourceGroup": [
"id",
"name"
],
"SourceVolume": [
"capacity",
"id",
"iops",
"name"
],
"Profile": [
"id",
"name"
],
"Zone": [
"display_name",
"id",
"name"
]
}
}
}
|
CloudWhisperCustomBot | app/api_discovery/utils.py | import base64
import hashlib
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
from loguru import logger
from sqlalchemy import update
from app.core.config import settings
from app.web.common import db_deps
def encrypt_api_key(api_key):
"""Encrypt api_key"""
if not api_key:
return ""
try:
salt = get_random_bytes(settings.encryption.SALT_LENGTH)
iv = get_random_bytes(settings.encryption.BLOCK_SIZE)
derived_secret = hashlib.pbkdf2_hmac(
hash_name='sha256', password=settings.encryption.SECRET.encode(), salt=salt,
iterations=settings.encryption.DERIVATION_ROUNDS
)
length = 16 - (len(api_key) % 16)
api_key += chr(length) * length
cipher = AES.new(derived_secret, AES.MODE_CBC, iv)
encrypted_bytes = cipher.encrypt(str.encode(api_key))
encrypted_data = base64.b64encode(encrypted_bytes + iv + salt)
return encrypted_data.decode('utf-8')
except Exception as e:
logger.info(f"Exception raised while encrypting: {api_key} Exception message: {e}")
return api_key
def decrypt_api_key(encrypted_api_key):
"""Decrypt api_key"""
if not encrypted_api_key:
return ""
try:
# Decode the base64 encoded string
encrypted_bytes = base64.b64decode(encrypted_api_key)
# Extract the iv and salt from the encrypted bytes
start_iv = len(encrypted_bytes) - settings.encryption.BLOCK_SIZE - settings.encryption.SALT_LENGTH
start_salt = len(encrypted_bytes) - settings.encryption.SALT_LENGTH
data, iv, salt = (
encrypted_bytes[:start_iv],
encrypted_bytes[start_iv:start_salt],
encrypted_bytes[start_salt:],
)
# Derive the secret key using the same parameters as encryption
derived_secret = hashlib.pbkdf2_hmac(
hash_name="sha256", password=settings.encryption.SECRET.encode(), salt=salt,
iterations=settings.encryption.DERIVATION_ROUNDS
)
# Initialize the AES cipher for decryption
cipher = AES.new(derived_secret, AES.MODE_CBC, iv)
# Decrypt the data
decrypted_bytes = cipher.decrypt(data)
# Remove the padding
length = decrypted_bytes[-1]
decrypted_api_key = decrypted_bytes[:-length].decode("utf-8")
return decrypted_api_key
except Exception as e:
logger.info(f"Exception raised while decrypting: {encrypted_api_key} Exception message: {e}")
return encrypted_api_key
async def update_profile_with_vpcplus_api_key(profile_id, api_key, api_key_name, api_key_expiry=None):
from app.models import Profile
encrypted_api_key = encrypt_api_key(api_key)
async with db_deps.get_db_session_async_context() as db_session:
profile_query = await db_session.execute(
update(Profile)
.where(Profile.user_id == profile_id)
.values(api_key={"API-KEY": encrypted_api_key, "name": api_key_name, "expires_at": api_key_expiry},
api_key_status=settings.api_key_status.STATUS_VALID)
.returning(Profile)
)
updated_profile = profile_query.scalars().first()
await db_session.commit()
return updated_profile
async def update_profile_api_key_status(profile_id, api_key_status):
from app.models import Profile
async with db_deps.get_db_session_async_context() as db_session:
profile_query = await db_session.execute(
update(Profile)
.where(Profile.user_id == profile_id)
.values(api_key_status=api_key_status)
.returning(Profile)
)
updated_profile = profile_query.scalars().first()
await db_session.commit()
return updated_profile
|
CloudWhisperCustomBot | app/api_discovery/discovery_task.py | import itertools
import json
import time
from datetime import datetime
import requests
from itertools import groupby
from loguru import logger
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from app.core.config import neo4j_driver as driver
from app.core.config import settings
# Load knowledge graph data
with open("app/api_discovery/node_rel.json") as file:
kg_data = json.load(file)
with open("app/api_discovery/classic-node-rel.json") as file:
classic_kg_data = json.load(file)
# Setup requests session with retries and timeout
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504])
adapter = HTTPAdapter(max_retries=retries)
session.mount('http://', adapter)
session.mount('https://', adapter)
RED = "\033[91m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
BLUE = "\033[94m"
MAGENTA = "\033[95m"
CYAN = "\033[96m"
WHITE = "\033[97m"
RESET = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def normalize_resource_type(resource_type):
return resource_type.lower().replace(' ', '_') + 's'
def preprocess_all_idle_resource(response, cloud_id):
resource_list = []
resource_type = None
# Iterate over each item in the 'items' list from the response.
logger.info(f"preprocessing: \n\n\n----> {response}")
for item in response.get("items", []):
# Extract the resource type from the current item.
resource_type = item.get("resource_type")
logger.info(item.get("db_resource_id"))
# Define structured data for each resource.
resource_data = {
"id": item.get("db_resource_id", "none"),
"crn": item.get("crn"),
"region": item.get("region"),
"cost": item.get("cost"),
"resource_type": item.get("resource_type"),
"idle_data": {
"id": item.get("id"),
"marked_at": item.get("marked_at"),
"estimated_savings": item.get("estimated_savings"),
"reason": item.get("reason"),
"resource_type": item.get("resource_type"),
"source_type": item.get("source_type"),
"status": item.get("status")
}
}
# Add the structured data to the corresponding resource type list.
if resource_type:
resource_list.append(resource_data)
response['items'] = resource_list
# Transform the data
transformed_data = {"items": [{"id": cloud_id, "resources": {k: list(v) for k, v in groupby(
sorted(response["items"], key=lambda x: normalize_resource_type(x["resource_type"])),
key=lambda x: normalize_resource_type(x["resource_type"]))}}]}
logger.info(f"TRANSFORMED data--->\n\n\n{transformed_data}\n\n\n")
return transformed_data
def execute_paginated_api(url, headers=None, params=None, timeout=20, cloud_payload=None, retry=0):
if "<cloud_id>" in url:
url = url.replace("<cloud_id>", params.get('cloud_id'))
logger.debug(f"URL---->{url}")
logger.debug(f"PARAMS---->{params}")
resource_response = dict()
page = 1
total_pages = None # Initialize total_pages
while True:
# Update the params with the current page
if params:
params.update({"page": page, "start": page})
else:
params = {"page": page, "start": page}
try:
# Send the request
response = session.get(url, headers=headers, params=params, timeout=timeout)
except requests.exceptions.RequestException as e:
logger.error(f"Request error: {e}")
if retry <= 2:
execute_paginated_api(url=url, headers=headers, params=params, timeout=20, cloud_payload=cloud_payload,
retry=retry+1)
return
# If response is empty, return the status code
logger.info(f"Status code of {url}---->{response.status_code}")
if response.status_code == 204:
return response.status_code
# If non-successful response, log and return
if response.status_code not in [200, 202]:
logger.error(f"Failed to fetch data: {response.status_code}")
return response.status_code
# Parse response data
data = response.json()
if isinstance(data, list):
data = {"items": data}
# Merge paginated items into resource_response
if 'items' in resource_response:
resources = resource_response['items']
resource_response['items'] = list(itertools.chain(resources, data.get('items', [])))
else:
resource_response = data
# Set total_pages on first response
if total_pages is None:
total_pages = data.get('total_pages', 1) # Default to 1 if total_pages isn't in response
logger.info(f"Total pages: {total_pages}")
# Break the loop if we've reached the last page
if page >= total_pages:
logger.info("All pages fetched. Breaking the loop.")
break
# Increment page number for the next request
page += 1
if "idle-resources" in url:
return preprocess_all_idle_resource(resource_response, cloud_id=params.get("cloud_id"))
if "v1/ibm/workflows" in url:
if resource_response['result']:
return resource_response
logger.info(f"delaying {url}")
time.sleep(3)
resource_response = execute_paginated_api(url, headers=headers, params=params, timeout=30,
cloud_payload=cloud_payload)
if resource_response.get('result', {}).get("resource_json", {}):
data = resource_response.get('result', {}).get("resource_json", {})
logger.info(type(data))
logger.info(params)
cloud_payload['items'][-1]['resources'] = json.loads(data)
resource_response = cloud_payload
logger.info(resource_response)
# Return the aggregated response
return resource_response
def recursive_discovery(payload, headers, dep_api=None, arg=None, user_id=None):
if isinstance(dep_api, list):
for api in dep_api:
if isinstance(api, dict):
for key, schema in api.items():
payload = execute_paginated_api(url=f'{settings.web.AUTH_LINK}/{key}', headers=headers, params=arg)
if not isinstance(payload, dict):
return
ingest_kg(payload, kg_data[key], user_id=user_id, cloud_id=arg.get('cloud_id'))
recursive_discovery(payload=payload, headers=headers, dep_api=schema, arg=arg, user_id=user_id)
else:
kg_api = api
new_param_key = ""
if '?' in api:
query_params_list = api.split('?')[1:]
api = api.split('?')[0]
for param in query_params_list:
new_param_key = param.split('=')[0]
arg[param.split('=')[0]] = param.split('=')[1]
payload = execute_paginated_api(url=f'{settings.web.AUTH_LINK}/{api}', headers=headers, params=arg)
arg.pop(new_param_key, None)
if not isinstance(payload, dict):
continue
ingest_kg(payload, kg_data[kg_api], user_id=user_id, cloud_id=arg.get('cloud_id'))
return
for key, dep_apis in dep_api.items():
if isinstance(payload, dict):
if payload.get("items"):
payload = payload['items']
for resource in payload:
for req_key, apis in dep_apis.items():
if '/' in req_key:
return
else:
if arg:
arg[req_key] = resource[key]
else:
arg = {req_key: resource[key]}
recursive_discovery(payload=payload, headers=headers, dep_api=apis, arg=arg, user_id=user_id)
return
def classic_recursive_discovery(payload, headers, dep_api=None, arg={}, user_id=None, path_param_arg={},
initial_payload=None):
if isinstance(dep_api, list):
for api in dep_api:
if isinstance(api, dict):
for key, schema in api.items():
if '{' in key:
logger.info(key)
payload = execute_paginated_api(url=f'{settings.web.AUTH_LINK}/{key}', headers=headers, params=arg,
cloud_payload=initial_payload)
logger.info(GREEN + f"API-> {key} \n Response->{payload}")
if not isinstance(payload, dict):
return
# ingest_kg(payload, kg_data[key], user_id=user_id)
classic_recursive_discovery(payload=payload, headers=headers, dep_api=schema, arg=arg,
user_id=user_id, initial_payload=initial_payload)
else:
args = arg
kg_api = api
if '?' in api:
query_params_list = api.split('?')[1:]
api = api.split('?')[0]
for param in query_params_list:
args[param.split('=')[0]] = param.split('=')[1]
if path_param_arg:
for key, value in path_param_arg.items():
api = api.replace(f'{{{key}}}', str(value))
logger.info(api)
payload = execute_paginated_api(url=f'{settings.web.AUTH_LINK}/{api}', headers=headers, params=arg,
cloud_payload=initial_payload)
if "v1/softlayer/discover" not in kg_api:
logger.info(f"ARGS --------------> {arg}")
ingest_kg(payload, classic_kg_data[kg_api], user_id=user_id, cloud_id=arg.get("softlayer_cloud_id"))
if not isinstance(payload, dict):
continue
print(f"injecting kg with {kg_api}")
return
if isinstance(payload, dict) and not payload.get("items"):
payload = {'items': [payload]}
elif isinstance(payload, list):
payload = {'items': payload}
for req_key, apis in dep_api.items():
for resource in payload.get('items'):
logger.info(req_key)
logger.info(apis)
if '/p' in req_key:
path_params = req_key[2:].split('+') if '+' in req_key else [req_key[2:]]
logger.info(path_params)
def param_finder(data, pattern):
if isinstance(data, list):
data = data[-1]
if pattern:
if '.' not in pattern.split('[*]')[0] and '[*]' not in pattern.split('[*]')[0] and '[*]' in \
pattern:
data = param_finder(data[pattern.split('[*]')[0]][-1], pattern.split('[*]')[-1])
return data
elif '.' not in pattern.split('.')[0] and '[*]' not in pattern.split('.')[0] and '.' in pattern:
logger.info(data[pattern.split('.')[-1]])
data = param_finder(data[pattern.split('.')[-1]], pattern.split('.')[-1])
return data
elif '[*]' not in pattern and '.' not in pattern:
logger.info('3')
return data
for param in path_params:
if "[*]" not in param or "." not in param:
logger.info(param)
logger.info(resource)
logger.info(type(resource))
path_param_arg[param.split('=')[-1]] = resource.get(param.split('=')[0])
else:
logger.info(f"{resource} {param.split('=')[0]}")
path_param_arg[param.split('=')[-1]] = param_finder(resource, param.split('=')[0])
logger.info(path_param_arg)
classic_recursive_discovery(payload=resource, headers=headers, dep_api=apis, arg=arg, user_id=user_id,
path_param_arg=path_param_arg, initial_payload=initial_payload)
elif '/' in req_key:
logger.info(req_key)
actual_key = req_key
for key, value in path_param_arg.items():
req_key = req_key.replace(f'{{{key}}}', str(value))
payload = execute_paginated_api(url=f'{settings.web.AUTH_LINK}/{req_key}', headers=headers, params=arg,
cloud_payload=initial_payload)
if "v1/softlayer/discover" not in actual_key:
logger.info(f"ARGS-->{arg}")
ingest_kg(payload, classic_kg_data[actual_key], user_id=user_id, cloud_id=arg.get("softlayer_cloud_id"))
classic_recursive_discovery(payload=payload, headers=headers, dep_api=apis, arg=arg, user_id=user_id,
path_param_arg=path_param_arg, initial_payload=initial_payload)
return
elif '/' not in req_key:
logger.info(req_key)
logger.info(resource)
logger.info(req_key.split('='))
arg[req_key.split('=')[-1]] = resource[req_key.split('=')[0]]
logger.info(f"ARG~~~~~~~>{arg}")
classic_recursive_discovery(payload=resource, headers=headers, dep_api=apis, arg=arg, user_id=user_id,
initial_payload=initial_payload)
break
return
def preprocess_payload(payload):
if isinstance(payload, list):
return {"items": payload}
return payload
def check_relation_bw_locations(previous_location, current_location, previous_loc_id, current_loc_id,
location_to_node_dict, relationships, user_id):
for rel in relationships:
if rel['source'] == location_to_node_dict.get(previous_location) and \
rel['target'] == location_to_node_dict.get(current_location):
return f"MATCH (n1:{location_to_node_dict[previous_location]}_{user_id} {{id: '{previous_loc_id}'}}) " \
f"MATCH (n2:{location_to_node_dict[current_location]}_{user_id} {{id: '{current_loc_id}'}}) " \
f"WITH n1, n2 MERGE (n1)-[:{rel['relationship']}]->(n2);\n"
elif rel['source'] == location_to_node_dict.get(current_location) and \
rel['target'] == location_to_node_dict.get(previous_location):
return f"MATCH (n1:{location_to_node_dict[current_location]}_{user_id} {{id: '{current_loc_id}'}}) " \
f"MATCH (n2:{location_to_node_dict[previous_location]}_{user_id} {{id: '{previous_loc_id}'}}) " \
f"WITH n1, n2 MERGE (n1)-[:{rel['relationship']}]->(n2);\n"
return
def get_location_combos(current_loc, previous_loc, current_nid, previous_nid, data, primary_key, location_to_node_dict,
query_list, relationships, user_id):
for key, value in data.items():
if isinstance(value, dict):
query = check_relation_bw_locations(previous_loc, current_loc + '.' + key, previous_nid,
value.get(primary_key), location_to_node_dict, relationships, user_id)
if query:
query_list += query
query_list = get_location_combos((current_loc + '.' + key), previous_loc, value.get(primary_key),
previous_nid, value, primary_key, location_to_node_dict, query_list,
relationships, user_id)
query_list = get_location_combos((current_loc + '.' + key), (current_loc + '.' + key),
value.get(primary_key, current_nid),
value.get(primary_key, previous_nid), value, primary_key,
location_to_node_dict, query_list, relationships, user_id)
if isinstance(value, list):
new_current_loc = current_loc + '.' + key + "[*]"
for sub_resource in value:
if not isinstance(sub_resource, str):
if isinstance(sub_resource, list):
continue
query_list = get_location_combos(new_current_loc, previous_loc,
sub_resource.get(primary_key, current_nid), previous_nid,
sub_resource, primary_key, location_to_node_dict, query_list,
relationships, user_id)
query = check_relation_bw_locations(previous_loc, new_current_loc, previous_nid,
current_nid if not isinstance(sub_resource, dict) else
sub_resource.get(primary_key, previous_nid),
location_to_node_dict, relationships, user_id)
if query:
query_list += query
query_list = get_location_combos(new_current_loc, new_current_loc,
sub_resource.get(primary_key, current_nid),
sub_resource.get(primary_key, previous_nid),
sub_resource, primary_key, location_to_node_dict,
query_list, relationships, user_id)
return query_list
def start_discovery(api_key, user_id):
headers = {
"accept": "application/json",
"Content-Type": "application/json",
"API-KEY": api_key
}
with open("app/api_discovery/api_schema.json") as f:
api_relation_schema = json.load(f)
for root_endpoint, dep_api in api_relation_schema.items():
logger.info(f"Processing endpoint: {root_endpoint}")
try:
response = requests.get(f'{settings.web.AUTH_LINK}/{root_endpoint}', headers=headers, timeout=30)
response.raise_for_status() # Check for HTTP errors
try:
payload = response.json() if response.content else None
except json.JSONDecodeError:
logger.error(f"Failed to decode JSON from {root_endpoint}: {response.text}")
continue
if not payload:
logger.error(f"Empty or invalid JSON received from {root_endpoint}")
continue
preprocess_payload(payload)
recursive_discovery(payload=payload, headers=headers, dep_api=dep_api, user_id=user_id)
ingest_kg(payload, kg_data[root_endpoint], user_id=user_id)
except requests.exceptions.RequestException as e:
logger.error(f"An error occurred with {root_endpoint}: {e}")
return
def classic_start_discovery(api_key, user_id):
headers = {
"accept": "application/json",
"Content-Type": "application/json",
"API-KEY": api_key
}
logger.info(f"API-KEY -->{api_key}")
with open("app/api_discovery/classic_api_schema.json") as f:
api_relation_schema = json.load(f)
for root_endpoint, dep_api in api_relation_schema.items():
logger.info(f"Processing endpoint: {root_endpoint}")
try:
response = requests.get(f'{settings.web.AUTH_LINK}/{root_endpoint}', headers=headers, timeout=30)
response.raise_for_status() # Check for HTTP errors
logger.info(f"Response->{response}\nStatus->{response.status_code}")
try:
payload = response.json() if response.content else None
except json.JSONDecodeError:
logger.error(f"Failed to decode JSON from {root_endpoint}: {response.text}")
continue
if not payload:
logger.error(f"Empty or invalid JSON received from {root_endpoint}")
continue
payload = preprocess_payload(payload)
logger.info(payload)
classic_recursive_discovery(payload=payload, headers=headers, dep_api=dep_api, user_id=user_id,
path_param_arg={}, initial_payload=payload)
# ingest_kg(payload, kg_data[root_endpoint], user_id=user_id)
print(f"injesting kg with {root_endpoint}")
except requests.exceptions.RequestException as e:
logger.error(f"An error occurred with {root_endpoint}: {e}")
return
def current_timestamp():
datetime_now = datetime.now()
return int(datetime_now.timestamp() * 1000)
def ingest_kg(payload, graph_info, user_id, cloud_id=None):
nodes = graph_info["Nodes"]
locations = graph_info["Location"]
attributes = graph_info["Attribute"]
relationships = graph_info["Relationships"]
def extract_node_query(node, data, path_lists, cypher_query, data_dict):
# Iterate over the list of node and create node query
new_data = data
for path_list in path_lists:
for path in path_list:
if not new_data or isinstance(new_data, list):
return cypher_query, data_dict
new_data = new_data.get(path)
if isinstance(new_data, list):
for x in new_data:
cypher_query, data_dict = extract_node_query(node, x, path_lists[1:] if len(path_lists) > 1 else [],
cypher_query, data_dict)
if isinstance(new_data, list):
return cypher_query, data_dict
if new_data and new_data.get("id"):
cypher_query += f'MERGE (n:{node}_{user_id} {{id: "{new_data.get("id")}", user_id: "{user_id}"}}) SET\n'
temp = {}
for key in attributes[node]:
if not new_data.get(key):
continue
temp[key] = new_data.get(key)
query_data = str(new_data[key])
cypher_query += f' n.{key}= "{query_data}", '
cypher_query += f' n.discovered_at= {current_timestamp()}, '
if cloud_id:
cypher_query += f' n.cloud_id= "{cloud_id}", '
cypher_query = cypher_query[:-2]
cypher_query += ";\n"
data_dict[node].append(temp)
return cypher_query, data_dict
cypher_query = ''
data_dict = {}
for node in nodes:
data_dict[node] = []
if isinstance(locations[node], dict):
for key, obj in locations[node].items():
path_list = [list(filter(None, a.split('.'))) for a in
list(filter(None, (obj.replace("'", "")).split("[*]")))]
cypher_query, data_dict = extract_node_query(node, payload, path_list, cypher_query, data_dict)
else:
path_list = [list(filter(None, a.split('.'))) for a in
list(filter(None, (locations[node].replace("'", "")).split("[*]")))]
cypher_query, data_dict = extract_node_query(node, payload, path_list, cypher_query, data_dict)
# DICTIONARY having location to node key-value pairs
location_to_node_dict = dict()
for node, loc in locations.items():
if isinstance(loc, dict):
for i, obj in loc.items():
location_to_node_dict[obj.replace("'", "")] = node.replace("'", "")
continue
location_to_node_dict[loc] = node
root_node = nodes[0]
root_key = (locations[root_node].replace("'", "")).strip('[*]') if isinstance(locations[root_node], str) else (
(locations[root_node][root_node].replace("'", "")).strip('[*]'))
root_location = locations[root_node].replace("'", "") if isinstance(locations[root_node], str) else (
(locations[root_node][root_node].replace("'", "")))
current_loc = previous_loc = root_location
primary_key = 'id'
if '[*]' in root_key:
root_key = root_key.split('[*]')[0]
query = ''
for resource in payload[root_key]:
query = get_location_combos(current_loc, previous_loc, resource[primary_key], resource[primary_key], resource,
primary_key, location_to_node_dict, query, relationships, user_id)
cypher_query += query
cypher_query_list = cypher_query.split(';')
final_list = []
with driver.session() as session:
for query in cypher_query_list:
query = query.strip()
if query and query not in final_list:
# logger.info(f"EXECUTING CYPER {root_node} QUERY ==>\n{query};")
final_list.append(query)
session.write_transaction(lambda tx, q=(query + ';'): tx.run(q))
logger.info("<==============================================================================>")
logger.info(f"<=============== ALL NODES OF {root_node} SUCCESSFULLY CREATED ===============>")
logger.info("<==============================================================================>")
|
CloudWhisperCustomBot | app/api_discovery/api_schema.json | {
"v1/ibm/clouds": {"id": {"cloud_id": [
"v1/ibm/kubernetes_clusters",
"v1/ibm/cloud_object_storages/keys",
"v1/ibm/vpcs",
"v1/ibm/geography/regions",
"v1/ibm/instance_template_constructs",
"v1/ibm/draas_blueprints?resource_type=IBMVpcNetwork",
"v1/ibm/draas_blueprints?resource_type=IBMKubernetesCluster",
"v1/ibm/draas/cos-buckets/backups",
"v1/ibm/cloud_object_storages/buckets",
"v1/ibm/cloud_object_storages",
"v1/ibm/instances",
"v1/ibm/backup_policies",
"v1/ibm/subnets",
"v1/ibm/instance_profiles",
"v1/ibm/snapshots",
"v1/ibm/floating_ips",
"v1/ibm/images",
"v1/ibm/clouds/<cloud_id>/idle-resources"
]
}
}
} |
CloudWhisperCustomBot | app/api_discovery/discovery.py | import asyncio
import httpx
import mailchimp_transactional as MailchimpTransactional
from celery_singleton import Singleton
from loguru import logger
from mailchimp_transactional.api_client import ApiClientError
from sqlalchemy import select, update, func
from app.api_discovery.discovery_task import start_discovery, classic_start_discovery
from app.models import Profile
from app.redis_scheduler import celery_app
from app.redis_scheduler import celery_app as celery
from app.web.common.db_deps import AsyncSessionLocal, get_db_session_async_context
from .delete_redundant_node_task import delete_redundant_nodes
from .discovery_task import current_timestamp
from .utils import decrypt_api_key, update_profile_api_key_status
from ..core.config import settings
def run_async(coro):
loop = asyncio.get_event_loop()
return loop.run_until_complete(coro)
@celery_app.task(name="discover_api_data", base=Singleton, queue='redis_queue')
def discover_api_data():
logger.info("<==============================================================================================>")
logger.info("<==================================== INITIATING DISCOVERY ====================================>")
logger.info("<==============================================================================================>")
async def async_operation(query):
async with AsyncSessionLocal() as session:
result = await session.execute(query)
users = result.scalars().all()
users = [{'user_id': user.user_id, 'api_key': user.api_key, 'email': user.email, 'name': user.name,
'api_key_status': user.api_key_status or ''} for user in users]
return users
async def run_task():
query = select(Profile)
users = await async_operation(query)
if users:
for user in users:
if not user['api_key']:
continue
if user.get('api_key_status') == settings.api_key_status.STATUS_INVALID:
continue
logger.info(f"apikey: {user['api_key']}, user_id: {user['user_id']}")
decrypted_api_key = decrypt_api_key(user['api_key']['API-KEY'])
api_key_name = user['api_key']['name']
headers = {"API-KEY": decrypted_api_key}
async with httpx.AsyncClient() as http_client:
resp = await http_client.get(f"{settings.web.AUTH_LINK}/v1/users/verify", headers=headers)
if not resp or resp.status_code == 401:
async with get_db_session_async_context() as db_session:
await update_profile_api_key_status(profile_id=user['user_id'],
api_key_status=settings.api_key_status.STATUS_INVALID,
db_session=db_session)
recipients = [{"email": user['email'], "type": "to"}]
send_email.delay(email_to=recipients, subject="VPC+ API Key Issue", user_name=user['name'],
api_key_name=api_key_name, vpcplus_url=settings.web.AUTH_LINK)
continue
current_time = current_timestamp()
logger.info(
"<==============================================================================================>")
logger.info(
f"<================== DISCOVERY FOR {user['user_id']} INITIATED at {current_time} ===============>")
logger.info(
"<==============================================================================================>")
start_discovery(api_key=decrypted_api_key, user_id=user['user_id'])
classic_start_discovery(api_key=decrypted_api_key, user_id=user['user_id'])
logger.info(
"<==============================================================================================>")
logger.info(
"<==================================== DELETING NODES ==========================================>")
logger.info(
"<==============================================================================================>")
delete_redundant_nodes(user_id=user['user_id'],
timestamp=current_time)
# Update the profile with the current timestamp after discovery is completed
async with get_db_session_async_context() as db_session:
await db_session.execute(
update(Profile).where(Profile.user_id == user['user_id']).values(last_updated_at=func.now())
)
await db_session.commit()
else:
logger.info("NO USER FOUND IN DATABASE")
asyncio.run(run_task())
@celery.task(name="send_email", base=Singleton, queue='redis_queue')
def send_email(email_to: list, subject: str = "", user_name: str = "", api_key_name: str = "",
vpcplus_url: str = "") -> None:
"""
This function initialize mail chimp client and send an email.
"""
text = (f"Hey {user_name},\n\n Your API Key with name: '{api_key_name}' has expired or has been deleted. Please"
f" visit {vpcplus_url}/settings -> API Key for further details. \n\n Thanks \n Wanclouds Inc.")
mailchimp = MailchimpTransactional.Client(settings.email.MANDRILL_API_KEY)
message = {
"from_email": settings.email.MAIL_USERNAME,
"subject": subject,
"text": text,
"to": email_to
}
try:
response = mailchimp.messages.send({"message": message})
logger.info('Send email called successfully: {}'.format(response))
except ApiClientError as error:
logger.error('An exception occurred: {}'.format(error.text))
|
CloudWhisperCustomBot | app/api_discovery/classic-node-rel.json | {
"v1/ibm/workflows/{root_id}/tasks/{task_id}": {
"Nodes": [
"ClassicCloud",
"ClassicDatacenter",
"ClassicLocationStatus",
"ClassicBareMetal",
"ClassicBareMetalStatus",
"ClassicNetworkVlan",
"ClassicVirtualGuest",
"ClassicSecurityGroup",
"ClassicRule",
"ClassicPrimaryRouter",
"ClassicPlacementGroupRule",
"ClassicPlacementGroup",
"ClassicLoadBalancer",
"ClassicHealthMonitor",
"ClassicListener",
"ClassicPool",
"ClassicSSHKey",
"ClassicSSL_Certificates",
"ClassicBlockVolume",
"ClassicServiceResource",
"ClassicFileVolume",
"ClassicFirewall",
"ClassicFirewallInterface",
"ClassicFirewallGuestNetworkComponent",
"ClassicGuestNetworkComponent",
"ClassicGuest",
"ClassicPrivateImage",
"ClassicPublicImage",
"ClassicVirtualServer",
"ClassicBlockDevices",
"ClassicDiskImage",
"ClassicOperatingSystem",
"ClassicSoftwareLicense",
"ClassicVirtualServerType",
"ClassicSoftwareComponent"
],
"Relationships": [
{
"source": "ClassicCloud",
"target": "ClassicSubnet",
"relationship": "HAS_SUBNET"
},
{
"source": "ClassicCloud",
"target": "ClassicBareMetal",
"relationship": "HAS_BAREMETAL"
},
{
"source": "ClassicCloud",
"target": "ClassicVirtualServer",
"relationship": "HAS_VIRTUAL_SERVER"
},
{
"source": "ClassicCloud",
"target": "ClassicNetworkVlan",
"relationship": "HAS_VLAN"
},
{
"source": "ClassicCloud",
"target": "ClassicPlacementGroup",
"relationship": "HAS_PLACEMENT_GROUP"
},
{
"source": "ClassicCloud",
"target": "ClassicLoadBalancer",
"relationship": "HAS_LOAD_BALANCER"
},
{
"source": "ClassicCloud",
"target": "ClassicSSHKey",
"relationship": "HAS_SSH_KEY"
},
{
"source": "ClassicCloud",
"target": "ClassicSSL_Certificates",
"relationship": "HAS_SSL_CERTIFICATE"
},
{
"source": "ClassicCloud",
"target": "ClassicBlockVolume",
"relationship": "HAS_BLOCK_VOLUME"
},
{
"source": "ClassicCloud",
"target": "ClassicDatacenter",
"relationship": "HAS_DATACENTER"
},
{
"source": "ClassicSubnet",
"target": "ClassicDatacenter",
"relationship": "LOCATED_IN"
},
{
"source": "ClassicDatacenter",
"target": "ClassicLocationStatus",
"relationship": "HAS_STATUS"
},
{
"source": "ClassicSubnet",
"target": "ClassicBareMetal",
"relationship": "ATTACHED_WITH"
},
{
"source": "ClassicBareMetal",
"target": "ClassicBareMetalStatus",
"relationship": "HAS_STATUS"
},
{
"source": "ClassicSubnet",
"target": "ClassicNetworkVlan",
"relationship": "BELONGS_TO"
},
{
"source": "ClassicSubnet",
"target": "ClassicVirtualGuest",
"relationship": "CONTAINS"
},
{
"source": "ClassicSecurityGroup",
"target": "ClassicRule",
"relationship": "HAS_RULE"
},
{
"source": "ClassicNetworkVlan",
"target": "ClassicSubnet",
"relationship": "HAS_PRIMARY_SUBNET"
},
{
"source": "ClassicNetworkVlan",
"target": "ClassicDatacenter",
"relationship": "LOCATED_IN"
},
{
"source": "ClassicDatacenter",
"target": "ClassicLocationStatus",
"relationship": "HAS_STATUS"
},
{
"source": "ClassicNetworkVlan",
"target": "ClassicPrimaryRouter",
"relationship": "HAS_PRIMARY_ROUTER"
},
{
"source": "ClassicPlacementGroup",
"target": "ClassicPlacementGroupRule",
"relationship": "HAS_RULE"
},
{
"source": "ClassicLoadBalancer",
"target": "ClassicDatacenter",
"relationship": "LOCATED_IN"
},
{
"source": "ClassicDatacenter",
"target": "ClassicLocationStatus",
"relationship": "HAS_STATUS"
},
{
"source": "ClassicLoadBalancer",
"target": "ClassicHealthMonitor",
"relationship": "HAS_HEALTH_MONITOR"
},
{
"source": "ClassicLoadBalancer",
"target": "ClassicListener",
"relationship": "HAS_LISTENER"
},
{
"source": "ClassicListener",
"target": "ClassicPool",
"relationship": "HAS_DEFAULT_POOL"
},
{
"source": "ClassicPool",
"target": "ClassicHealthMonitor",
"relationship": "HAS_HEALTH_MONITOR"
},
{
"source": "ClassicBlockVolume",
"target": "ClassicServiceResource",
"relationship": "HAS_SERVICE_RESOURCE"
},
{
"source": "ClassicFileVolume",
"target": "ClassicServiceResource",
"relationship": "USES"
},
{
"source": "ClassicServiceResource",
"target": "ClassicDatacenter",
"relationship": "LOCATED_IN"
},
{
"source": "ClassicServiceResource",
"target": "AttributeType",
"relationship": "HAS_ATTRIBUTE"
},
{
"source": "ClassicFileVolume",
"target": "ClassicServiceResource",
"relationship": "USES"
},
{
"source": "ClassicServiceResource",
"target": "ClassicDatacenter",
"relationship": "LOCATED_IN"
},
{
"source": "ClassicServiceResource",
"target": "AttributeType",
"relationship": "HAS_ATTRIBUTE"
},
{
"source": "ClassicFirewall",
"target": "ClassicFirewallInterface",
"relationship": "HAS_INTERFACE"
},
{
"source": "ClassicFirewall",
"target": "ClassicFirewallGuestNetworkComponent",
"relationship": "HAS_GUEST_NETWORK_COMPONENT"
},
{
"source": "ClassicFirewallGuestNetworkComponent",
"target": "ClassicGuest",
"relationship": "BELONGS_TO"
},
{
"source": "ClassicGuest",
"target": "Status",
"relationship": "HAS_STATUS"
},
{
"source": "ClassicVirtualServer",
"target": "ClassicBlockDevices",
"relationship": "HAS_BLOCK_DEVICE"
},
{
"source": "ClassicBlockDevices",
"target": "ClassicDiskImage",
"relationship": "HAS_DISK_IMAGE"
},
{
"source": "ClassicVirtualServer",
"target": "ClassicDatacenter",
"relationship": "LOCATED_IN"
},
{
"source": "ClassicDatacenter",
"target": "ClassicLocationStatus",
"relationship": "HAS_STATUS"
},
{
"source": "ClassicNetworkVlan",
"target": "ClassicSubnet",
"relationship": "HAS_SUBNET"
},
{
"source": "ClassicVirtualServer",
"target": "ClassicOperatingSystem",
"relationship": "RUNS_ON"
},
{
"source": "ClassicOperatingSystem",
"target": "ClassicSoftwareLicense",
"relationship": "HAS_LICENSE"
},
{
"source": "ClassicVirtualServer",
"target": "ClassicVirtualServerType",
"relationship": "HAS_TYPE"
},
{
"source": "ClassicSubnet",
"target": "ClassicNetworkVlan",
"relationship": "PART_OF_VLAN"
},
{
"source": "ClassicOperatingSystem",
"target": "ClassicSoftwareLicense",
"relationship": "HAS_LICENSE"
},
{
"source": "ClassicSoftwareComponent",
"target": "ClassicSoftwareLicense",
"relationship": "HAS_LICENSE"
},
{
"source": "ClassicBareMetal",
"target": "ClassicDatacenter",
"relationship": "LOCATED_IN"
},
{
"source": "ClassicDatacenter",
"target": "ClassicLocationStatus",
"relationship": "HAS_STATUS"
},
{
"source": "ClassicBareMetal",
"target": "ClassicSubnet",
"relationship": "USES"
},
{
"source": "ClassicBareMetal",
"target": "ClassicOperatingSystem",
"relationship": "RUNS"
},
{
"source": "ClassicOperatingSystem",
"target": "ClassicSoftwareLicense",
"relationship": "HAS_LICENSE"
},
{
"source": "ClassicBareMetal",
"target": "ClassicSoftwareComponent",
"relationship": "HAS_COMPONENT"
},
{
"source": "ClassicSoftwareComponent",
"target": "ClassicSoftwareLicense",
"relationship": "HAS_LICENSE"
}
],
"Location": {
"ClassicCloud": "items[*]",
"ClassicSubnet": {
"ClassicSubnet": "items[*].resources.datacenters[*].subnets[*]",
"ClassicNetworkVlan": "items.resources.datacenters[*].vlans[*].primarySubnetId",
"ClassicVirtualServer": "items[*].resources.datacenters[*].virtual_servers[*].networkComponents[*].primarySubnet",
"ClassicBareMetal": "items[*].resources.datacenters[*].bare_metal_servers[*].networkComponents[*].primarySubnet"
},
"ClassicDatacenter": {
"ClassicDatacenter": "items[*].resources.datacenters[*]",
"ClassicSubnet": "items[*].resources.datacenters[*].subnets[*].datacenter",
"ClassicNetworkVlan": "items.resources.datacenters[*].vlans[*].primaryRouter.datacenter",
"ClassicLoadBalancer": "items[*].resources.datacenters[*].load_balancers[*].datacenter",
"ClassicFileVolume": "items[*].resources.datacenters[*].file_volumes[*].serviceResource.datacenter",
"ClassicBlockVolume": "items[*].resources.datacenters[*].block_volumes[*].serviceResource.datacenter",
"ClassicVirtualServer": "items[*].resources.datacenters[*].virtual_servers[*].datacenter",
"ClassicBareMetal": "items[*].resources.datacenters[*].bare_metal_servers[*].datacenter"
},
"ClassicLocationStatus": {
"ClassicSubnet": "items[*].resources.datacenters[*].subnets[*].datacenter.locationStatus",
"ClassicNetworkVlan": "items[*].resources.datacenters[*].vlans[*].primaryRouter.datacenter.locationStatus",
"ClassicLoadBalancer": "items[*]resources.datacenters[*].load_balancers[*].datacenter.locationStatus",
"ClassicVirtualServer": "items[*].resources.datacenters[*].virtual_servers[*].datacenter.locationStatus",
"ClassicBareMetal": "items[*].resources.datacenters[*].bare_metal_servers[*].datacenter.locationStatus"
},
"ClassicBareMetal": {
"ClassicBareMetal": "items[*].resources.datacenters[*].bare_metal_servers[*]",
"ClassicSubnet": "items[*].resources.datacenters[*].subnets[*].hardware[*]"
},
"ClassicBareMetalStatus": "items[*].resources.datacenters[*].subnets[*].hardware[*].hardwareStatus",
"ClassicVirtualGuest": "items[*].resources.datacenters[*].subnets[*].virtualGuests[*]",
"ClassicSecurityGroup": "items[*].resources.datacenters[*].security_groups[*]",
"ClassicRule": "items[*].resources.datacenters[*].security_groups[*].rules[*]",
"ClassicNetworkVlan": {
"ClassicNetworkVlan": "items.resources.datacenters[*].vlans[*]",
"ClassicSubnet": "items[*].resources.datacenters[*].subnets[*].networkVlan",
"ClassicVirtualServer": "items[*].resources.datacenters[*].virtual_servers[*].networkComponents[*].networkVlan"
},
"ClassicPrimaryRouter": "items[*].resources.datacenters[*].vlans[*].primaryRouter",
"ClassicPlacementGroup": "items[*].resources.datacenters[*].placement_groups[*]",
"ClassicPlacementGroupRule": "items[*].resources.datacenters[*].placement_groups[*].rule",
"ClassicLoadBalancer": "items[*].resources.datacenters[*].load_balancers[*]",
"ClassicHealthMonitor": "items[*].resources.datacenters[*].load_balancers[*].healthMonitors[*]",
"ClassicListener": "items[*].resources.datacenters[*].load_balancers[*].listeners[*]",
"ClassicPool": "items[*].resources.datacenters[*].load_balancers[*].listeners[*].defaultPool",
"ClassicSSHKey": {
"ClassicSSHKey": "items[*].resources.sshkeys[*]"
},
"ClassicSSL_Certificates": "items[*].resources.ssl_certificates[*]",
"ClassicBlockVolume": "items[*].resources.datacenters[*].block_volumes[*]",
"ClassicServiceResource": {
"ClassicBlockVolume": "items[*].resources.datacenters[*].block_volumes[*].serviceResource",
"ClassicFileVolume": "items[*].resources.datacenters[*].file_volumes[*].serviceResource"
},
"ClassicFileVolume": "items[*].resources.datacenters[*].file_volumes[*]",
"ClassicFirewall": "items[*].resources.datacenters[*].firewalls[*]",
"ClassicFirewallInterface": "items[*].resources.datacenters[*].firewalls[*].firewallInterfaces[*]",
"ClassicFirewallGuestNetworkComponent": "items[*].resources.datacenters[*].firewalls[*].firewallGuestNetworkComponents[*]",
"ClassicGuestNetworkComponent": "items[*].resources.datacenters[*].firewalls[*].firewallGuestNetworkComponents[*].guestNetworkComponent",
"ClassicGuest": "items[*].resources.datacenters[*].firewalls[*].firewallGuestNetworkComponents[*].guestNetworkComponent.guest",
"ClassicPrivateImage": "items[*].resources.private_images[*]",
"ClassicPublicImage": "items[*].resources.public_images[*]",
"ClassicVirtualServer": "items[*].resources.datacenters[*].virtual_servers[*]",
"ClassicBlockDevices": "items[*].resources.datacenters[*].virtual_servers[*].blockDevices[*]",
"ClassicDiskImage": "items[*].resources.datacenters[*].virtual_servers[*].blockDevices[*].diskImage",
"ClassicOperatingSystem": {
"ClassicVirtualServer": "items[*].resources.datacenters[*].virtual_servers[*].operatingSystem",
"ClassicBareMetal": "items[*].resources.datacenters[*].bare_metal_servers[*].operatingSystem"
},
"ClassicSoftwareLicense": {
"ClassicVirtualServer": "items[*].resources.datacenters[*].virtual_servers[*].operatingSystem.softwareLicense",
"ClassicBareMetal": "items[*].resources.datacenters[*].bare_metal_servers[*].softwareComponents[*].softwareLicense"
},
"ClassicVirtualServerStatus": "items[*].resources.datacenters[*].virtual_servers[*].status",
"ClassicVirtualServerType": "items[*].resources.datacenters[*].virtual_servers[*].type",
"ClassicSoftwareComponent": "items[*].resources.datacenters[*].bare_metal_servers[*].softwareComponents[*]"
},
"Attribute": {
"ClassicCloud": [
"id",
"name",
"username"
],
"ClassicSubnet": [
"broadcastAddress",
"cidr",
"gateway",
"id",
"isCustomerOwned",
"isCustomerRoutable",
"modifyDate",
"netmask",
"networkIdentifier",
"networkVlanId",
"sortOrder",
"subnetType",
"totalIpAddresses",
"usableIpAddressCount",
"version",
"ipAddressCount",
"virtualGuests"
],
"ClassicDatacenter": [
"id",
"longName",
"name",
"statusId"
],
"ClassicLocationStatus": [
"id",
"status"
],
"ClassicBareMetal": [
"hostname",
"id",
"manufacturerSerialNumber",
"notes",
"provisionDate",
"serialNumber",
"serviceProviderId",
"serviceProviderResourceId",
"globalIdentifier",
"networkManagementIpAddress",
"primaryBackendIpAddress",
"primaryIpAddress",
"privateIpAddress",
"memoryCapacity",
"processorPhysicalCoreAmount"
],
"ClassicBareMetalStatus": [
"id",
"status"
],
"ClassicVirtualGuest": [
"accountId",
"createDate",
"dedicatedAccountHostOnlyFlag",
"deviceStatusId",
"domain",
"fullyQualifiedDomainName",
"hostname",
"id",
"lastPowerStateId",
"lastVerifiedDate",
"maxCpu",
"maxCpuUnits",
"maxMemory",
"metricPollDate",
"modifyDate",
"placementGroupId",
"provisionDate",
"reclaimDate",
"startCpus",
"statusId",
"typeId",
"uuid",
"globalIdentifier",
"primaryBackendIpAddress",
"primaryIpAddress",
"status"
],
"ClassicSecurityGroup": [
"createDate",
"id",
"modifyDate",
"name",
"description",
"networkComponentBindings"
],
"ClassicRule": [
"createDate",
"direction",
"ethertype",
"id",
"modifyDate",
"portRangeMax",
"portRangeMin",
"protocol",
"remoteGroupId",
"remoteIp",
"securityGroupId"
],
"ClassicNetworkVlan": [
"accountId",
"fullyQualifiedName",
"id",
"modifyDate",
"name",
"primarySubnetId",
"vlanNumber",
"hardwareCount",
"subnetCount",
"virtualGuestCount",
"networkSpace",
"totalPrimaryIpAddressCount",
"firewallInterfaces"
],
"ClassicPrimaryRouter": [
"fullyQualifiedDomainName",
"id"
],
"networkComponents": [
"maxSpeed",
"name",
"port",
"speed",
"primaryIpAddress",
"securityGroupBindings"
],
"SoftwareDescription": [
"longDescription",
"manufacturer",
"name",
"version"
],
"ClassicPlacementGroup": [
"createDate",
"id",
"name"
],
"ClassicPlacementGroupRule": [
"id",
"keyName",
"name"
],
"ClassicLoadBalancer": [
"accountId",
"address",
"createDate",
"id",
"isDataLogEnabled",
"isPublic",
"locationId",
"modifyDate",
"name",
"operatingStatus",
"provisioningStatus",
"type",
"useSystemPublicIpPool",
"uuid",
"l7Pools",
"members",
"sslCiphers"
],
"ClassicHealthMonitor": [
"createDate",
"id",
"interval",
"maxRetries",
"modifyDate",
"monitorType",
"provisioningStatus",
"timeout",
"urlPath",
"uuid"
],
"ClassicListener": [
"clientTimeout",
"connectionLimit",
"createDate",
"id",
"modifyDate",
"protocol",
"protocolPort",
"provisioningStatus",
"serverTimeout",
"tlsCertificateId",
"uuid"
],
"ClassicPool": [
"createDate",
"loadBalancingAlgorithm",
"modifyDate",
"protocol",
"protocolPort",
"provisioningStatus",
"uuid"
],
"ClassicSSHKey": [
"createDate",
"fingerprint",
"id",
"key",
"label",
"modifyDate"
],
"ClassicSSL_Certificates": [
"commonName",
"id",
"validityDays"
],
"ClassicBlockVolume": [
"capacityGb",
"id",
"username",
"activeTransactionCount",
"replicationPartnerCount",
"lunId",
"serviceResourceBackendIpAddress"
],
"ClassicFileVolume": [
"capacityGb",
"id",
"username",
"activeTransactionCount",
"replicationPartnerCount",
"bytesUsed",
"fileNetworkMountAddress",
"serviceResourceBackendIpAddress"
],
"ClassicServiceResource": [
"backendIpAddress",
"id",
"name",
"attributes",
"type"
],
"ClassicFirewall": [
"accountId",
"fullyQualifiedName",
"id",
"modifyDate",
"primarySubnetId",
"vlanNumber",
"dedicatedFirewallFlag",
"highAvailabilityFirewallFlag"
],
"ClassicFirewallInterface": [
"id",
"name"
],
"ClassicFirewallGuestNetworkComponent": [
"guestNetworkComponentId",
"id",
"networkComponentId",
"status"
],
"ClassicGuestNetworkComponent": [
"createDate",
"guestId",
"id",
"macAddress",
"maxSpeed",
"modifyDate",
"name",
"networkId",
"port",
"speed",
"status",
"uuid"
],
"ClassicGuest": [
"accountId",
"createDate",
"dedicatedAccountHostOnlyFlag",
"deviceStatusId",
"domain",
"fullyQualifiedDomainName",
"hostname",
"id",
"lastPowerStateId",
"lastVerifiedDate",
"maxCpu",
"maxCpuUnits",
"maxMemory",
"metricPollDate",
"modifyDate",
"placementGroupId",
"provisionDate",
"reclaimDate",
"startCpus",
"statusId",
"typeId",
"uuid",
"Status"
],
"ClassicPrivateImage": [
"accountId",
"createDate",
"id",
"name",
"parentId",
"globalIdentifier"
],
"ClassicPublicImage": [
"accountId",
"createDate",
"id",
"name",
"parentId",
"globalIdentifier"
],
"ClassicVirtualServer": [
"dedicatedAccountHostOnlyFlag",
"fullyQualifiedDomainName",
"hostname",
"id",
"maxCpu",
"maxCpuUnits",
"maxMemory",
"allowedNetworkStorage",
"networkComponents",
"regionalGroup",
"firewallServiceComponent",
"status",
"type"
],
"ClassicBlockDevices": [
"bootableFlag",
"createDate",
"device",
"diskImageId",
"guestId",
"hotPlugFlag",
"id",
"modifyDate",
"mountMode",
"mountType",
"statusId",
"uuid"
],
"ClassicDiskImage": [
"capacity",
"createDate",
"description",
"id",
"modifyDate",
"name",
"parentId",
"storageRepositoryId",
"typeId",
"units",
"uuid"
],
"ClassicOperatingSystem": [
"hardwareId",
"id",
"manufacturerLicenseInstance",
"softwareLicense"
],
"ClassicSoftwareLicense": [
"id",
"softwareDescriptionId",
"softwareDescription"
],
"ClassicVirtualServerType": [
"id",
"keyName",
"name"
],
"ClassicSoftwareComponent": [
"hardwareId",
"id",
"manufacturerLicenseInstance"
]
}
}
} |
CloudWhisperCustomBot | app/core/config.py | import os
import sys
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS, VectorStore
from loguru import logger
from neo4j import GraphDatabase
from pathlib import Path
from pydantic import AnyHttpUrl, AnyUrl, BaseSettings
from typing import List, Optional
import logging
from app.core.logging import InterceptHandler
class RedisConfig:
REDIS_PARAMS = {
"REDIS_HOST": os.environ.get("REDIS_HOST", "redis"),
"REDIS_PORT": os.environ.get("REDIS_PORT", "6379"),
"REDIS_DB_NUMBER": os.environ.get("REDIS_DB_NUMBER", "0")
}
REDIS_URL = "redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB_NUMBER}".format(
**REDIS_PARAMS
)
class OpenAIConfig:
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "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")
class AnthropicConfig:
ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY", "")
class GroqConfig:
GROQ_API_KEY = os.environ.get("GROQ_API_KEY", "")
class AWSConfig:
AWS_ACCESS_KEY = os.environ.get("AWS_ACCESS_KEY", "")
AWS_SECRET_KEY = os.environ.get("AWS_SECRET_KEY", "")
AWS_REGION = os.environ.get("AWS_REGION", "")
class QdrantConfig:
QDRANT_LOCATION = os.environ.get("QDRANT_LOCATION", "http://qdrant:6333")
QDRANT_API_KEY = os.environ.get("QDRANT_API_KEY", None)
QDRANT_URL = os.environ.get("QDRANT_URL", None)
QDRANT_TIME_OUT = int(os.environ.get("QDRANT_TIME_OUT", 20))
class BaseBotConfig:
X_API_KEY = os.environ.get("X_API_KEY", "")
BASE_BOT_ID = os.environ.get("BASE_BOT_ID", "")
BASE_BOT_URL = os.environ.get(
"BASE_BOT_URL",
"https://stage.wanclouds.ai/v1/whisper/bots/{BASE_BOT_ID}/qna_chats"
).format(BASE_BOT_ID=BASE_BOT_ID)
@classmethod
def validate(cls):
if not cls.X_API_KEY:
raise ValueError("X_API_KEY is not set in the environment variables")
if not cls.BASE_BOT_ID:
raise ValueError("BASE_BOT_ID is not set in the environment variables")
class EmailConfig:
# mail settings
MANDRILL_API_KEY = os.environ.get("MANDRILL_API_KEY", "6_CfHuNwVIjZYClUYCDxgQ")
# gmail authentication
MAIL_USERNAME = os.environ.get("MAIL_USERNAME", "noreply@wanclouds.net")
class EncryptionConfig:
SALT_LENGTH = 32
DERIVATION_ROUNDS = 100000
BLOCK_SIZE = 16
KEY_SIZE = 32
SECRET = "nw2FrNshF"
class APIKeyStatusConfig:
STATUS_VALID = "VALID"
STATUS_INVALID = "INVALID"
class VectorStoreSettings(BaseSettings):
PKL_FILE: str = "/src/cache/vector_store.pkl"
CACHE_DIR: str = "cache"
INDEX_NAME: str = os.getenv("VECTOR_STORE_INDEX_NAME", "intents")
vector_store: Optional[VectorStore] = None
def __init__(self):
super().__init__()
def load_vector_store(self):
vectorstore = FAISS.load_local(
folder_path=self.CACHE_DIR,
embeddings=HuggingFaceEmbeddings(),
index_name=self.INDEX_NAME,
)
return vectorstore
def get_vector_store(self) -> VectorStore:
if not self.vector_store:
return self.load_vector_store()
return self.vector_store
def get_intents_path(self):
current_folder = Path.cwd()
cache_dir = current_folder / 'app/whisper/utils/intents.json'
return cache_dir.resolve().as_posix()
def get_cache_path(self):
current_folder = Path.cwd()
cache_dir = current_folder / "cache"
return cache_dir.resolve().as_posix()
def create_vector_store_index(self):
from app.whisper.utils.json_loader import JSONLoader
if not os.path.exists(self.PKL_FILE):
try:
loader = JSONLoader(
file_path=self.get_intents_path())
data = loader.load()
embeddings = HuggingFaceEmbeddings()
vector_store = FAISS.from_documents(data, embeddings)
vector_store.save_local(folder_path=self.get_cache_path(), index_name=self.INDEX_NAME)
return vector_store
except Exception as e:
logging.error(f"Error during ingestion: {e}")
class LoggingSettings(BaseSettings):
LOGGING_LEVEL: int = logging.INFO # logging levels are ints
class WebConfig:
AUTH_LINK: AnyUrl = os.environ.get("AUTH_LINK", "https://vpc-stage.wanclouds.net")
BACKEND_URI: AnyUrl = os.environ.get("BACKEND_URI", "https://cloudwhisper-stage.wanclouds.ai")
class DatabaseConfig:
CW_DB_PARAMS = {
"CW_DB_POSTGRES_USER": os.environ.get("POSTGRES_USER", ""),
"CW_DB_POSTGRES_PASSWORD": os.environ.get("POSTGRES_PASSWORD", ""),
"CW_DB_HOST": os.environ.get("ENV_CW_DB_HOST", "postgresdb"),
"CW_DB_PORT": os.environ.get("ENV_CW_DB_PORT", "5432"),
"CW_DB_NAME": os.environ.get("POSTGRES_DB", "cloud_whisper"),
"CW_DB_POSTGRES_CERT": os.environ.get("ENV_CW_DB_CERT", "")
}
POSTGRES_URL = "postgresql+asyncpg://{CW_DB_POSTGRES_USER}:{CW_DB_POSTGRES_PASSWORD}@{CW_DB_HOST}:{CW_DB_PORT}/{" \
"CW_DB_NAME}".format(**CW_DB_PARAMS)
POSTGRES_DB_URL = f"{POSTGRES_URL}?ssl_ca={CW_DB_PARAMS['CW_DB_POSTGRES_CERT']}" if CW_DB_PARAMS.get(
"CW_DB_POSTGRES_CERT") else POSTGRES_URL
class SQLAlchemyConfig(BaseSettings):
SQLALCHEMY_DATABASE_URI: str = DatabaseConfig.POSTGRES_DB_URL
SQLALCHEMY_TRACK_MODIFICATIONS: bool = False
SQLALCHEMY_POOL_RECYCLE: int = int(os.environ.get("SQLALCHEMY_POOL_RECYCLE", "400"))
SQLALCHEMY_POOL_TIMEOUT: int = int(os.environ.get("SQLALCHEMY_POOL_TIMEOUT", "450"))
SQLALCHEMY_POOL_SIZE: int = int(os.environ.get("SQLALCHEMY_POOL_SIZE", "5"))
SQLALCHEMY_MAX_OVERFLOW: int = int(os.environ.get("SQLALCHEMY_MAX_OVERFLOW", "0"))
SQLALCHEMY_ENGINE_OPTIONS: dict = {
"pool_recycle": SQLALCHEMY_POOL_RECYCLE,
"pool_timeout": SQLALCHEMY_POOL_TIMEOUT,
"pool_size": SQLALCHEMY_POOL_SIZE,
"max_overflow": SQLALCHEMY_MAX_OVERFLOW
}
class PaginationConfig:
DEFAULT_LIMIT = 10
MAX_PAGE_LIMIT = 50
class Settings(BaseSettings):
URL_PREFIX: str = "/v1/whisper"
OPENAPI_URL: str = f"{URL_PREFIX}/openapi.json"
DOCS_URL: str = URL_PREFIX + "/docs"
BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = [
"http://localhost:3000",
"http://localhost:8008", # type: ignore
]
web: WebConfig = WebConfig()
db: SQLAlchemyConfig = SQLAlchemyConfig()
logging: LoggingSettings = LoggingSettings()
redis: RedisConfig = RedisConfig()
openai: OpenAIConfig = OpenAIConfig()
anthropic: AnthropicConfig = AnthropicConfig()
groq: GroqConfig = GroqConfig()
aws: AWSConfig = AWSConfig()
qdrant: QdrantConfig = QdrantConfig()
base_bot: BaseBotConfig = BaseBotConfig()
email: EmailConfig = EmailConfig()
encryption: EncryptionConfig = EncryptionConfig()
api_key_status: APIKeyStatusConfig = APIKeyStatusConfig()
pagination_config: PaginationConfig = PaginationConfig()
vector_store: VectorStoreSettings = VectorStoreSettings()
def __init__(self):
super().__init__()
def setup_app_logging(config: Settings) -> None:
"""Prepare custom logging for our application."""
LOGGERS = ("uvicorn.asgi", "uvicorn.access")
logging.getLogger().handlers = [InterceptHandler()]
for logger_name in LOGGERS:
logging_logger = logging.getLogger(logger_name)
logging_logger.handlers = [InterceptHandler(level=config.logging.LOGGING_LEVEL)]
logger.configure(
handlers=[{"sink": sys.stderr, "level": config.logging.LOGGING_LEVEL}]
)
def neo4j_driver():
url = os.getenv("NEO4J_URI", "bolt://neo4j:7687")
user = os.getenv("NEO4J_USERNAME", "neo4j")
password = os.getenv("NEO4J_PASSWORD", "72054321")
logger.info(f"Connecting to {url}")
driver = GraphDatabase.driver(url, auth=(user, password))
return driver
settings = Settings()
neo4j_driver = neo4j_driver()
|
CloudWhisperCustomBot | app/core/logging.py | import logging
from types import FrameType
from typing import cast
from loguru import logger
class InterceptHandler(logging.Handler):
def emit(self, record: logging.LogRecord) -> None: # pragma: no cover
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = str(record.levelno)
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
logger.info(level)
while frame.f_code.co_filename == logging.__file__: # noqa: WPS609
frame = cast(FrameType, frame.f_back)
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level,
record.getMessage(),
)
|
CloudWhisperCustomBot | migrations/script.py.mako | """${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision: str = ${repr(up_revision)}
down_revision: Union[str, None] = ${repr(down_revision)}
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
def upgrade() -> None:
${upgrades if upgrades else "pass"}
def downgrade() -> None:
${downgrades if downgrades else "pass"}
|
CloudWhisperCustomBot | migrations/README | Generic single-database configuration with an async dbapi.
|
CloudWhisperCustomBot | migrations/env.py | import asyncio
from logging.config import fileConfig
from sqlalchemy import pool
from sqlalchemy.engine import Connection
from sqlalchemy.ext.asyncio import async_engine_from_config
from alembic import context
from app.models.base import Base
from app.core.config import settings
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = Base.metadata
config.set_main_option('sqlalchemy.url', settings.db.SQLALCHEMY_DATABASE_URI)
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def do_run_migrations(connection: Connection) -> None:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
async def run_async_migrations() -> None:
"""In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = async_engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
async with connectable.connect() as connection:
await connection.run_sync(do_run_migrations)
await connectable.dispose()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode."""
asyncio.run(run_async_migrations())
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
CloudWhisperCustomBot | migrations/versions/4a431d838915_.py | """empty message
Revision ID: 4a431d838915
Revises: 933e5e67d032
Create Date: 2024-09-03 04:59:18.509767
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision: str = '4a431d838915'
down_revision: Union[str, None] = '933e5e67d032'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('messages', sa.Column('json_metadata', postgresql.JSONB(astext_type=sa.Text()), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('messages', 'json_metadata')
# ### end Alembic commands ###
|
CloudWhisperCustomBot | migrations/versions/325ce6b01196_.py | """empty message
Revision ID: 325ce6b01196
Revises: cbe4893ba969
Create Date: 2024-08-24 18:55:16.284670
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision: str = '325ce6b01196'
down_revision: Union[str, None] = 'cbe4893ba969'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('chats', sa.Column('json_metadata', postgresql.JSONB(astext_type=sa.Text()), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('chats', 'json_metadata')
# ### end Alembic commands ### |
CloudWhisperCustomBot | migrations/versions/14e9d53952a9_.py | """empty message
Revision ID: 14e9d53952a9
Revises: 6408613d0565
Create Date: 2024-11-26 10:36:46.527741
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = '14e9d53952a9'
down_revision: Union[str, None] = '6408613d0565'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('activity_tracking', sa.Column('is_polled', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('activity_tracking', 'is_polled')
# ### end Alembic commands ###
|