Spaces:
Sleeping
Sleeping
adding app to huggingface space
Browse files
README.md
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
---
|
2 |
-
title: GRC Framework
|
3 |
emoji: π
|
4 |
colorFrom: yellow
|
5 |
colorTo: purple
|
@@ -10,4 +10,14 @@ pinned: false
|
|
10 |
license: apache-2.0
|
11 |
---
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: AI Analysis and GRC Framework Application
|
3 |
emoji: π
|
4 |
colorFrom: yellow
|
5 |
colorTo: purple
|
|
|
10 |
license: apache-2.0
|
11 |
---
|
12 |
|
13 |
+
## Introduction
|
14 |
+
This project presents a comprehensive AI analysis tool integrated with the ChatGPT API, focusing on various aspects such as AI Audit, Sentiment Analysis, Toxicity Detection, Emotion Detection, and more. The application is designed to assist in Governance, Risk Management, and Compliance (GRC) frameworks, providing insightful AI-driven analysis.
|
15 |
+
|
16 |
+
## Features
|
17 |
+
- **AI Audit Analysis**: In-depth auditing capabilities for AI systems.
|
18 |
+
- **Sentiment Analysis**: Evaluates sentiments with confidence scores and text snippets.
|
19 |
+
- **Toxicity Detection**: Identifies and assesses toxic content.
|
20 |
+
- **Emotion Detection**: Analyzes and reports on the emotional tone of text.
|
21 |
+
- Additional analysis categories covering a wide range of AI applications.
|
22 |
+
- Interactive web interface using Gradio.
|
23 |
+
- Integration with OpenAI's ChatGPT API for robust AI analysis.
|
app.py
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import os
|
3 |
+
import requests
|
4 |
+
import gradio as gr
|
5 |
+
requests.adapters.DEFAULT_TIMEOUT = 60
|
6 |
+
import time
|
7 |
+
import openai
|
8 |
+
from openai import OpenAI
|
9 |
+
from utils import ai_audit_analysis_categories, get_system_prompt, ANALYSIS_TYPES
|
10 |
+
import json
|
11 |
+
|
12 |
+
|
13 |
+
# Create Global Variables
|
14 |
+
client = OpenAI(api_key= "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")
|
15 |
+
|
16 |
+
global complete_chat_history, bot_last_message
|
17 |
+
bot_last_message = ""
|
18 |
+
complete_chat_history = []
|
19 |
+
|
20 |
+
|
21 |
+
# /////////////////// *****************************///////////////// Utitlity Functions
|
22 |
+
#region Utility Functions
|
23 |
+
|
24 |
+
# Function to update OpenAPI key the API key
|
25 |
+
def update_api_key(new_api_key):
|
26 |
+
global client
|
27 |
+
if new_api_key.strip() != "":
|
28 |
+
client = OpenAI(api_key=new_api_key)
|
29 |
+
return "API Key updated successfully"
|
30 |
+
|
31 |
+
|
32 |
+
def load_chatboat_last_message():
|
33 |
+
return bot_last_message
|
34 |
+
|
35 |
+
def load_chatboat_complet_history():
|
36 |
+
complete_text = ""
|
37 |
+
for turn in complete_chat_history:
|
38 |
+
user_message, bot_message = turn
|
39 |
+
complete_text = f"{complete_text}\nUser: {user_message}\nAssistant: {bot_message}"
|
40 |
+
return complete_text
|
41 |
+
|
42 |
+
|
43 |
+
def format_json_result_to_html(result):
|
44 |
+
formatted_result = ""
|
45 |
+
for key, value in result.items():
|
46 |
+
if isinstance(value, list):
|
47 |
+
formatted_result += f"<strong>{key.title()}:</strong><br>" + "<br>".join(value) + "<br><br>"
|
48 |
+
else:
|
49 |
+
formatted_result += f"<strong>{key.title()}:</strong> {value}<br>"
|
50 |
+
return formatted_result.strip()
|
51 |
+
|
52 |
+
def format_json_result(result):
|
53 |
+
formatted_result = ""
|
54 |
+
for key, value in result.items():
|
55 |
+
if isinstance(value, list):
|
56 |
+
formatted_result += f"{key.title()}:\n" + "\n".join(value) + "\n\n"
|
57 |
+
else:
|
58 |
+
formatted_result += f"{key.title()}: {value}\n"
|
59 |
+
return formatted_result.strip()
|
60 |
+
|
61 |
+
# Function to dynamically format the JSON result into Markdown format
|
62 |
+
def format_result_to_markdown(result):
|
63 |
+
formatted_result = ""
|
64 |
+
for key, value in result.items():
|
65 |
+
formatted_result += f"**{key.title()}**: "
|
66 |
+
if isinstance(value, list):
|
67 |
+
formatted_result += "\n" + "\n".join(f"- {item}" for item in value) + "\n\n"
|
68 |
+
else:
|
69 |
+
formatted_result += f"{value}\n\n"
|
70 |
+
return formatted_result.strip()
|
71 |
+
|
72 |
+
#endregion
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
|
79 |
+
# /////////////////// *****************************///////////////// Conversation with Open Ai Chatboat
|
80 |
+
#region Conversation with Open Ai Chatboat
|
81 |
+
# A Normal call to OpenAI API '''
|
82 |
+
def chat(system_prompt, user_prompt, model = 'gpt-3.5-turbo', temperature = 0):
|
83 |
+
response = client.chat.completions.create(
|
84 |
+
messages=[
|
85 |
+
{"role": "system", "content": system_prompt},
|
86 |
+
{"role": "user", "content": user_prompt}
|
87 |
+
],
|
88 |
+
model="gpt-3.5-turbo",
|
89 |
+
)
|
90 |
+
|
91 |
+
res = response.choices[0].message.content
|
92 |
+
return res
|
93 |
+
|
94 |
+
# Lets format the prompt from the chat_history so that its looks good on the UI
|
95 |
+
def format_chat_prompt(message, chat_history, max_convo_length):
|
96 |
+
prompt = ""
|
97 |
+
for turn in chat_history[-max_convo_length:]:
|
98 |
+
user_message, bot_message = turn
|
99 |
+
prompt = f"{prompt}\nUser: {user_message}\nAssistant: {bot_message}"
|
100 |
+
prompt = f"{prompt}\nUser: {message}\nAssistant:"
|
101 |
+
return prompt
|
102 |
+
|
103 |
+
|
104 |
+
# This function gets a message from user, passes it to chat gpt and return the output
|
105 |
+
def get_response_from_chatboat(message,chat_history, max_convo_length=10):
|
106 |
+
global bot_last_message, complete_chat_history
|
107 |
+
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
|
108 |
+
bot_message = chat(system_prompt='You are a friendly chatbot. Generate the output for only the Assistant.',user_prompt=formatted_prompt)
|
109 |
+
|
110 |
+
chat_history.append((message, bot_message))
|
111 |
+
complete_chat_history.append((message, bot_message))
|
112 |
+
bot_last_message = bot_message
|
113 |
+
return "", chat_history
|
114 |
+
|
115 |
+
#endregion
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
def analyse_current_conversation(text, analysis_type):
|
120 |
+
|
121 |
+
if(ANALYSIS_TYPES.get(analysis_type, None) is None):
|
122 |
+
return f"Analysis type {analysis_type} is not implemented yet, please choose another category"
|
123 |
+
|
124 |
+
if not text:
|
125 |
+
return f"No text provided to analyze for {analysis_type}, please provide text or load from chatboat history"
|
126 |
+
|
127 |
+
word_count = len(text.split())
|
128 |
+
|
129 |
+
if(word_count < 20 ):
|
130 |
+
return f" The text is too short to analyze for {analysis_type}, please provide a large text"
|
131 |
+
|
132 |
+
system_prompt = get_system_prompt(analysis_type)
|
133 |
+
text_to_analyze = text
|
134 |
+
|
135 |
+
response = client.chat.completions.create(
|
136 |
+
messages=[
|
137 |
+
{"role": "system", "content": system_prompt},
|
138 |
+
{"role": "user", "content": text_to_analyze}
|
139 |
+
],
|
140 |
+
model="gpt-3.5-turbo",
|
141 |
+
)
|
142 |
+
|
143 |
+
analysis_result = response.choices[0].message.content
|
144 |
+
print(analysis_result)
|
145 |
+
parsed_result = json.loads(analysis_result)
|
146 |
+
|
147 |
+
formated_json = format_result_to_markdown(parsed_result)
|
148 |
+
|
149 |
+
print(parsed_result)
|
150 |
+
# Your implementation for counting words and performing analysis
|
151 |
+
return formated_json
|
152 |
+
|
153 |
+
|
154 |
+
|
155 |
+
|
156 |
+
|
157 |
+
#region UI Related Functions
|
158 |
+
|
159 |
+
def update_dropdown(main_category):
|
160 |
+
# Get the subcategories based on the selected main category
|
161 |
+
subcategories = ai_audit_analysis_categories.get(main_category, [])
|
162 |
+
print(subcategories)
|
163 |
+
return gr.Dropdown(choices=subcategories, value=subcategories[0] if subcategories else None)
|
164 |
+
|
165 |
+
|
166 |
+
def update_analysis_type(subcategory):
|
167 |
+
pass
|
168 |
+
print(subcategory)
|
169 |
+
|
170 |
+
|
171 |
+
|
172 |
+
#endregion
|
173 |
+
|
174 |
+
|
175 |
+
|
176 |
+
with gr.Blocks() as demo:
|
177 |
+
|
178 |
+
# Add a text field for the API key
|
179 |
+
api_key_field = gr.Textbox(label="Enter your Chatgpt OpenAI API Key")
|
180 |
+
update_api_key_btn = gr.Button("Update API Key")
|
181 |
+
update_api_key_btn.click(update_api_key, inputs=[api_key_field], outputs=[])
|
182 |
+
|
183 |
+
gr.Markdown("# AI Audit and GRC Framework!")
|
184 |
+
|
185 |
+
with gr.Tabs():
|
186 |
+
with gr.TabItem("AI Chatbot"):
|
187 |
+
gr.Markdown("## AI Chatbot")
|
188 |
+
chatbot = gr.Chatbot(height=600)
|
189 |
+
msg = gr.Textbox(label="Write something for the chatbot here")
|
190 |
+
clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")
|
191 |
+
submit_btn = gr.Button("Submit")
|
192 |
+
submit_btn.click(get_response_from_chatboat, inputs=[msg, chatbot], outputs=[msg, chatbot])
|
193 |
+
msg.submit(get_response_from_chatboat, inputs=[msg, chatbot], outputs=[msg, chatbot])
|
194 |
+
|
195 |
+
with gr.TabItem("AI Analysis"):
|
196 |
+
gr.Markdown("## AI Analysis")
|
197 |
+
gr.Markdown("Load your chatbot text or write your own to and analyze it")
|
198 |
+
text_field = gr.Textbox(label="Text to Process", interactive=True, lines=2)
|
199 |
+
|
200 |
+
# Radio button and dropdown list
|
201 |
+
initial_main_category = next(iter(ai_audit_analysis_categories))
|
202 |
+
initial_sub_categories = ai_audit_analysis_categories[initial_main_category]
|
203 |
+
|
204 |
+
main_category_radio = gr.Radio(list(ai_audit_analysis_categories.keys()), label="Main Audit Categories", value=initial_main_category)
|
205 |
+
sub_category_dropdown = gr.Dropdown(choices=initial_sub_categories, label="Sub Categories", value=initial_sub_categories[0])
|
206 |
+
# Update the dropdown based on the radio selection
|
207 |
+
main_category_radio.change(fn=update_dropdown, inputs= main_category_radio, outputs=sub_category_dropdown)
|
208 |
+
sub_category_dropdown.change(fn=update_analysis_type, inputs=sub_category_dropdown)
|
209 |
+
|
210 |
+
load_last_message_btn = gr.Button("Load Last Message")
|
211 |
+
load_complete_conv_btn = gr.Button("Load Complete Chat History")
|
212 |
+
process_btn = gr.Button("Process")
|
213 |
+
# analysis_result = gr.Label()
|
214 |
+
analysis_result = gr.Markdown()
|
215 |
+
load_last_message_btn.click(load_chatboat_last_message, inputs=[], outputs=text_field)
|
216 |
+
load_complete_conv_btn.click(load_chatboat_complet_history, inputs=[], outputs=text_field)
|
217 |
+
process_btn.click(analyse_current_conversation, inputs=[text_field, sub_category_dropdown], outputs=analysis_result)
|
218 |
+
|
219 |
+
demo.launch(share=True)
|
220 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
requests
|
2 |
+
gradio
|
3 |
+
openai
|
utils.py
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
ai_audit_analysis_categories = {
|
4 |
+
"AI Audit": [
|
5 |
+
"sentiment_analysis",
|
6 |
+
"emotion_detection",
|
7 |
+
"political_bias_detection",
|
8 |
+
"stress_level_detection",
|
9 |
+
"empathy_level_assessment",
|
10 |
+
"mood_detection",
|
11 |
+
"toxicity_detection"
|
12 |
+
],
|
13 |
+
|
14 |
+
"GDPR": [
|
15 |
+
"Data Handling and Processing",
|
16 |
+
"Consent and Transparency",
|
17 |
+
"Data Security",
|
18 |
+
"Environmental Impact"],
|
19 |
+
|
20 |
+
"Toxicity": [
|
21 |
+
"Content Moderation",
|
22 |
+
"Reporting Mechanism",
|
23 |
+
"Content guidelines",
|
24 |
+
"User Education"],
|
25 |
+
|
26 |
+
"Legal": [
|
27 |
+
"Privacy Policy",
|
28 |
+
"Data Retention",
|
29 |
+
"Consent Mechanism",
|
30 |
+
"GDPR Compliance"],
|
31 |
+
|
32 |
+
"Context": [
|
33 |
+
"Ethical AI",
|
34 |
+
"Bais Mitigation",
|
35 |
+
"Fairness Assestment",
|
36 |
+
"Explainability"],
|
37 |
+
|
38 |
+
"Governance": [
|
39 |
+
"Model development",
|
40 |
+
"Data Quality",
|
41 |
+
"Bais Mitigation",
|
42 |
+
"Fairness Assestment"
|
43 |
+
"Explainability"
|
44 |
+
"User Input"],
|
45 |
+
|
46 |
+
"RiskManagement": [
|
47 |
+
"Corporate Ethics",
|
48 |
+
"Board Management",
|
49 |
+
"Stakeholder Engagement",
|
50 |
+
"Risk Management"],
|
51 |
+
|
52 |
+
"Robustness": [
|
53 |
+
"System Reliability",
|
54 |
+
"Quality Assurance", "Stress Testing",
|
55 |
+
"Fail-Safe Procedures"],
|
56 |
+
|
57 |
+
"Sustainability": [
|
58 |
+
"Renewable Resources",
|
59 |
+
"Waste Reduction",
|
60 |
+
"Energy Efficiency",
|
61 |
+
"Sustainable Practices"]
|
62 |
+
}
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
# Define a standard template for prompts
|
70 |
+
STANDARD_PROMPT_TEMPLATE = "You are a data analysis assistant capable of {analysis_type} analysis. {specific_instruction} Respond with your analysis in JSON format. The JSON schema should include '{json_schema}'."
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
def get_system_prompt(analysis_type: str) -> str:
|
76 |
+
specific_instruction = ANALYSIS_TYPES.get(analysis_type, "Perform the analysis as per the specified type.")
|
77 |
+
json_schema = JSON_SCHEMAS.get(analysis_type, {})
|
78 |
+
json_schema_str = ', '.join([f"'{key}': {value}" for key, value in json_schema.items()])
|
79 |
+
return (f"You are a data analyst API capable of {analysis_type} analysis. "
|
80 |
+
f"{specific_instruction} Please respond with your analysis directly in JSON format "
|
81 |
+
f"(without using Markdown code blocks or any other formatting). Always include confidence_score:number (0-1) with two decimals for result based on analysis"
|
82 |
+
f"The JSON schema should include: {{{json_schema_str}}}.")
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
|
87 |
+
ANALYSIS_TYPES = {
|
88 |
+
"sentiment_analysis": "Analyze the sentiment of the provided text. Determine whether the sentiment is positive, negative, or neutral and provide a confidence score.",
|
89 |
+
"emotion_detection": "Detect and identify the primary emotions expressed in the provided text. Provide a score for the intensity of the detected emotion.",
|
90 |
+
"political_bias_detection": "Detect any political bias in the provided text, identifying leaning towards particular ideologies or parties.",
|
91 |
+
"stress_level_detection": "Analyze the text to assess stress levels, identifying triggers and intensity of stress.",
|
92 |
+
"empathy_level_assessment": "Assess the level of empathy expressed in the text, identifying empathetic responses and tendencies.",
|
93 |
+
"mood_detection": "Detect the mood of the individual based on textual cues, ranging from happy to sad, calm to angry.",
|
94 |
+
"toxicity_detection": "Identify and assess the level of toxicity in the provided text. Determine whether the text contains harmful, offensive, or inappropriate content and provide a score indicating the severity of the toxicity"
|
95 |
+
}
|
96 |
+
|
97 |
+
|
98 |
+
JSON_SCHEMAS = {
|
99 |
+
"sentiment_analysis": {
|
100 |
+
"sentiment": "string (positive, negative, neutral)",
|
101 |
+
"confidence_score": "number (0-1)",
|
102 |
+
"text_snippets": "array of strings (specific text portions contributing to sentiment)"
|
103 |
+
},
|
104 |
+
"emotion_detection": {
|
105 |
+
"emotion": "string (primary emotion detected)",
|
106 |
+
"confidence_score": "number (0-1)",
|
107 |
+
"secondary_emotions": "array of objects (secondary emotions and their scores)"
|
108 |
+
},
|
109 |
+
"political_bias_detection": {
|
110 |
+
"bias": "string (left, right, neutral)",
|
111 |
+
"confidence_score": "number (0-1)",
|
112 |
+
"bias_indicators": "array of strings (elements indicating bias)",
|
113 |
+
"political_alignment_score": "number (quantifying degree of political bias)"
|
114 |
+
},
|
115 |
+
"stress_level_detection": {
|
116 |
+
"stress_level": "string",
|
117 |
+
"stress_triggers": "array of strings"
|
118 |
+
},
|
119 |
+
"empathy_level_assessment": {
|
120 |
+
"empathy_level": "string",
|
121 |
+
"empathetic_responses": "array of strings"
|
122 |
+
},
|
123 |
+
"mood_detection": {
|
124 |
+
"mood": "string",
|
125 |
+
"mood_intensity": "number"
|
126 |
+
},
|
127 |
+
"toxicity_detection": {
|
128 |
+
"toxicity_level": "string (none, low, medium, high)",
|
129 |
+
"toxicity_flags": "array of strings (specific words or phrases contributing to toxicity)",
|
130 |
+
"contextual_factors": "array of objects (additional contextual elements influencing toxicity interpretation)"
|
131 |
+
}
|
132 |
+
}
|
133 |
+
|
134 |
+
|
135 |
+
|
136 |
+
|