GRC_framework / app.py
abdulnim's picture
some changes
ef4d7d5
raw
history blame contribute delete
No virus
8.98 kB
import os
import requests
import gradio as gr
requests.adapters.DEFAULT_TIMEOUT = 60
import time
import openai
from openai import OpenAI
from utils import ai_audit_analysis_categories, get_system_prompt, ANALYSIS_TYPES
import json
# Create Global Variables
client = OpenAI(api_key= "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")
global complete_chat_history, bot_last_message
bot_last_message = ""
complete_chat_history = []
# /////////////////// *****************************///////////////// Utitlity Functions
#region Utility Functions
# Function to update OpenAPI key the API key
def update_api_key(new_api_key):
global client
if new_api_key.strip() != "":
client = OpenAI(api_key=new_api_key)
return "API Key updated successfully"
def load_chatboat_last_message():
return bot_last_message
def load_chatboat_complet_history():
complete_text = ""
for turn in complete_chat_history:
user_message, bot_message = turn
complete_text = f"{complete_text}\nUser: {user_message}\nAssistant: {bot_message}"
return complete_text
def format_json_result_to_html(result):
formatted_result = ""
for key, value in result.items():
if isinstance(value, list):
formatted_result += f"<strong>{key.title()}:</strong><br>" + "<br>".join(value) + "<br><br>"
else:
formatted_result += f"<strong>{key.title()}:</strong> {value}<br>"
return formatted_result.strip()
def format_json_result(result):
formatted_result = ""
for key, value in result.items():
if isinstance(value, list):
formatted_result += f"{key.title()}:\n" + "\n".join(value) + "\n\n"
else:
formatted_result += f"{key.title()}: {value}\n"
return formatted_result.strip()
# Function to dynamically format the JSON result into Markdown format
def format_result_to_markdown(result):
formatted_result = ""
for key, value in result.items():
formatted_result += f"**{key.title()}**: "
if isinstance(value, list):
formatted_result += "\n" + "\n".join(f"- {item}" for item in value) + "\n\n"
else:
formatted_result += f"{value}\n\n"
return formatted_result.strip()
#endregion
# /////////////////// *****************************///////////////// Conversation with Open Ai Chatboat
#region Conversation with Open Ai Chatboat
# A Normal call to OpenAI API '''
def chat(system_prompt, user_prompt, model = 'gpt-3.5-turbo', temperature = 0):
response = client.chat.completions.create(
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
model="gpt-3.5-turbo",
)
res = response.choices[0].message.content
return res
# Lets format the prompt from the chat_history so that its looks good on the UI
def format_chat_prompt(message, chat_history, max_convo_length):
prompt = ""
for turn in chat_history[-max_convo_length:]:
user_message, bot_message = turn
prompt = f"{prompt}\nUser: {user_message}\nAssistant: {bot_message}"
prompt = f"{prompt}\nUser: {message}\nAssistant:"
return prompt
# This function gets a message from user, passes it to chat gpt and return the output
def get_response_from_chatboat(message,chat_history, max_convo_length=10):
global bot_last_message, complete_chat_history
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
bot_message = chat(system_prompt='You are a friendly chatbot. Generate the output for only the Assistant.',user_prompt=formatted_prompt)
chat_history.append((message, bot_message))
complete_chat_history.append((message, bot_message))
bot_last_message = bot_message
return "", chat_history
#endregion
def analyse_current_conversation(text, analysis_type):
try:
if(ANALYSIS_TYPES.get(analysis_type, None) is None):
return f"Analysis type {analysis_type} is not implemented yet, please choose another category"
if not text:
return f"No text provided to analyze for {analysis_type}, please provide text or load from chatboat history"
word_count = len(text.split())
if(word_count < 20 ):
return f" The text is too short to analyze for {analysis_type}, please provide a large text"
system_prompt = get_system_prompt(analysis_type)
text_to_analyze = text
response = client.chat.completions.create(
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": text_to_analyze}
],
model="gpt-3.5-turbo",
)
analysis_result = response.choices[0].message.content
print(analysis_result)
# Parse the result, handle JSON parsing errors
try:
parsed_result = json.loads(analysis_result)
except json.JSONDecodeError:
return "Failed to parse the analysis result. Please check the format of the returned data."
formatted_json = format_result_to_markdown(parsed_result)
return formatted_json
except KeyError as e:
return f"Key error occurred: {e}. Please check your keys."
except Exception as e:
# Check if the error message is related to the API key
if 'API key' in str(e):
return "OpenAI API key error: Please verify your API key."
else:
return f"An unexpected error occurred: {e}. Please check your implementation."
# parsed_result = json.loads(analysis_result)
# formated_json = format_result_to_markdown(parsed_result)
# print(parsed_result)
# # Your implementation for counting words and performing analysis
# return formated_json
#region UI Related Functions
def update_dropdown(main_category):
# Get the subcategories based on the selected main category
subcategories = ai_audit_analysis_categories.get(main_category, [])
print(subcategories)
return gr.Dropdown(choices=subcategories, value=subcategories[0] if subcategories else None)
def update_analysis_type(subcategory):
pass
print(subcategory)
#endregion
with gr.Blocks() as demo:
gr.Markdown("<center><img src='https://huggingface.co/spaces/abdulnim/GRC_framework/resolve/main/logo.png' alt='Align X' width='150'/></center>")
# Add a text field for the API key
api_key_field = gr.Textbox(label="Enter your Chatgpt OpenAI API Key")
update_api_key_btn = gr.Button("Update API Key")
update_api_key_btn.click(update_api_key, inputs=[api_key_field], outputs=[])
# gr.Markdown("# AI Audit and GRC Framework!")
gr.Markdown("# AlignXX Demo")
with gr.Tabs():
with gr.TabItem("Prompt Testing"):
gr.Markdown("## Prompt Testing")
chatbot = gr.Chatbot(height=600)
msg = gr.Textbox(label="Write something for the chatbot here")
clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")
submit_btn = gr.Button("Submit")
submit_btn.click(get_response_from_chatboat, inputs=[msg, chatbot], outputs=[msg, chatbot])
msg.submit(get_response_from_chatboat, inputs=[msg, chatbot], outputs=[msg, chatbot])
with gr.TabItem("Prompt Assessment"):
gr.Markdown("## Prompt Assessment")
gr.Markdown("Load your chatbot text or write your own to and analyze it")
text_field = gr.Textbox(label="Text to Process", interactive=True, lines=2)
# Radio button and dropdown list
initial_main_category = next(iter(ai_audit_analysis_categories))
initial_sub_categories = ai_audit_analysis_categories[initial_main_category]
main_category_radio = gr.Radio(list(ai_audit_analysis_categories.keys()), label="Main Audit Categories", value=initial_main_category)
sub_category_dropdown = gr.Dropdown(choices=initial_sub_categories, label="Sub Categories", value=initial_sub_categories[0])
# Update the dropdown based on the radio selection
main_category_radio.change(fn=update_dropdown, inputs= main_category_radio, outputs=sub_category_dropdown)
sub_category_dropdown.change(fn=update_analysis_type, inputs=sub_category_dropdown)
load_last_message_btn = gr.Button("Load Last Message")
load_complete_conv_btn = gr.Button("Load Complete Chat History")
process_btn = gr.Button("Process")
# analysis_result = gr.Label()
analysis_result = gr.Markdown()
load_last_message_btn.click(load_chatboat_last_message, inputs=[], outputs=text_field)
load_complete_conv_btn.click(load_chatboat_complet_history, inputs=[], outputs=text_field)
process_btn.click(analyse_current_conversation, inputs=[text_field, sub_category_dropdown], outputs=analysis_result)
demo.launch(share=True)