import os
import requests
import gradio as gr
requests.adapters.DEFAULT_TIMEOUT = 60
import time
import openai
from openai import OpenAI
from utils import ai_audit_analysis_categories, get_system_prompt, ANALYSIS_TYPES
import json
# Create Global Variables
client = OpenAI(api_key= "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")
global complete_chat_history, bot_last_message
bot_last_message = ""
complete_chat_history = []
# /////////////////// *****************************///////////////// Utitlity Functions
#region Utility Functions
# Function to update OpenAPI key the API key
def update_api_key(new_api_key):
global client
if new_api_key.strip() != "":
client = OpenAI(api_key=new_api_key)
return "API Key updated successfully"
def load_chatboat_last_message():
return bot_last_message
def load_chatboat_complet_history():
complete_text = ""
for turn in complete_chat_history:
user_message, bot_message = turn
complete_text = f"{complete_text}\nUser: {user_message}\nAssistant: {bot_message}"
return complete_text
def format_json_result_to_html(result):
formatted_result = ""
for key, value in result.items():
if isinstance(value, list):
formatted_result += f"{key.title()}:
" + "
".join(value) + "
"
else:
formatted_result += f"{key.title()}: {value}
"
return formatted_result.strip()
def format_json_result(result):
formatted_result = ""
for key, value in result.items():
if isinstance(value, list):
formatted_result += f"{key.title()}:\n" + "\n".join(value) + "\n\n"
else:
formatted_result += f"{key.title()}: {value}\n"
return formatted_result.strip()
# Function to dynamically format the JSON result into Markdown format
def format_result_to_markdown(result):
formatted_result = ""
for key, value in result.items():
formatted_result += f"**{key.title()}**: "
if isinstance(value, list):
formatted_result += "\n" + "\n".join(f"- {item}" for item in value) + "\n\n"
else:
formatted_result += f"{value}\n\n"
return formatted_result.strip()
#endregion
# /////////////////// *****************************///////////////// Conversation with Open Ai Chatboat
#region Conversation with Open Ai Chatboat
# A Normal call to OpenAI API '''
def chat(system_prompt, user_prompt, model = 'gpt-3.5-turbo', temperature = 0):
response = client.chat.completions.create(
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
model="gpt-3.5-turbo",
)
res = response.choices[0].message.content
return res
# Lets format the prompt from the chat_history so that its looks good on the UI
def format_chat_prompt(message, chat_history, max_convo_length):
prompt = ""
for turn in chat_history[-max_convo_length:]:
user_message, bot_message = turn
prompt = f"{prompt}\nUser: {user_message}\nAssistant: {bot_message}"
prompt = f"{prompt}\nUser: {message}\nAssistant:"
return prompt
# This function gets a message from user, passes it to chat gpt and return the output
def get_response_from_chatboat(message,chat_history, max_convo_length=10):
global bot_last_message, complete_chat_history
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
bot_message = chat(system_prompt='You are a friendly chatbot. Generate the output for only the Assistant.',user_prompt=formatted_prompt)
chat_history.append((message, bot_message))
complete_chat_history.append((message, bot_message))
bot_last_message = bot_message
return "", chat_history
#endregion
def analyse_current_conversation(text, analysis_type):
try:
if(ANALYSIS_TYPES.get(analysis_type, None) is None):
return f"Analysis type {analysis_type} is not implemented yet, please choose another category"
if not text:
return f"No text provided to analyze for {analysis_type}, please provide text or load from chatboat history"
word_count = len(text.split())
if(word_count < 20 ):
return f" The text is too short to analyze for {analysis_type}, please provide a large text"
system_prompt = get_system_prompt(analysis_type)
text_to_analyze = text
response = client.chat.completions.create(
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": text_to_analyze}
],
model="gpt-3.5-turbo",
)
analysis_result = response.choices[0].message.content
print(analysis_result)
# Parse the result, handle JSON parsing errors
try:
parsed_result = json.loads(analysis_result)
except json.JSONDecodeError:
return "Failed to parse the analysis result. Please check the format of the returned data."
formatted_json = format_result_to_markdown(parsed_result)
return formatted_json
except KeyError as e:
return f"Key error occurred: {e}. Please check your keys."
except Exception as e:
# Check if the error message is related to the API key
if 'API key' in str(e):
return "OpenAI API key error: Please verify your API key."
else:
return f"An unexpected error occurred: {e}. Please check your implementation."
# parsed_result = json.loads(analysis_result)
# formated_json = format_result_to_markdown(parsed_result)
# print(parsed_result)
# # Your implementation for counting words and performing analysis
# return formated_json
#region UI Related Functions
def update_dropdown(main_category):
# Get the subcategories based on the selected main category
subcategories = ai_audit_analysis_categories.get(main_category, [])
print(subcategories)
return gr.Dropdown(choices=subcategories, value=subcategories[0] if subcategories else None)
def update_analysis_type(subcategory):
pass
print(subcategory)
#endregion
with gr.Blocks() as demo:
gr.Markdown("