mona's implementation
Browse files- app.py +116 -97
- requirements.txt +5 -4
app.py
CHANGED
@@ -1,124 +1,143 @@
|
|
|
|
|
|
1 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
|
|
|
2 |
import torch
|
3 |
import gradio as gr
|
4 |
-
import
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
# Intentionally hidden when deployed on HuggingFace Space
|
9 |
-
API_Key = "censored-for-security-reason"
|
10 |
|
11 |
-
# Load emotion
|
12 |
emotion_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-emotion")
|
13 |
emotion_model = AutoModelForSeq2SeqLM.from_pretrained("mrm8488/t5-base-finetuned-emotion")
|
14 |
|
15 |
-
#
|
16 |
-
|
17 |
-
|
18 |
-
text_tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it", token=token)
|
19 |
-
text_model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b-it", token=token)
|
20 |
|
21 |
-
# Set device to CUDA
|
22 |
device = torch.device('cpu')
|
23 |
emotion_model.to(device)
|
24 |
text_model.to(device)
|
25 |
|
26 |
-
#
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
34 |
|
35 |
-
|
36 |
-
# Function to predict emotion
|
37 |
def get_emotion(text):
|
38 |
input_ids = emotion_tokenizer.encode(text + '</s>', return_tensors='pt').to(device)
|
39 |
output = emotion_model.generate(input_ids=input_ids, max_length=2)
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
else:
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
"
|
77 |
-
"
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
def journal_interface(Diary):
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
except Exception as e:
|
104 |
-
# Return the error message to the interface for debugging
|
105 |
-
print(f"Error encountered: {str(e)}")
|
106 |
-
return f"Error: {str(e)}", "", "" #None # Last output is for the image
|
107 |
-
|
108 |
-
# Create the Gradio interface
|
109 |
interface = gr.Interface(
|
110 |
fn=journal_interface,
|
111 |
inputs=gr.Textbox(lines=5, placeholder="Enter your thoughts here..."),
|
112 |
outputs=[
|
113 |
gr.Textbox(label="Detected Emotion"),
|
114 |
-
gr.Textbox(label="Generated Quote
|
115 |
-
gr.Textbox(label="Generated Quote 2"),
|
116 |
-
#gr.Image(label="Emotion Image", visible=True, width="100%") # Show image based on emotion
|
117 |
],
|
118 |
title="AI-Powered Personal Journal",
|
119 |
-
description="Enter your thoughts, and the AI will detect the emotion and generate
|
120 |
-
theme=gr.themes.Soft()
|
121 |
)
|
|
|
|
|
|
|
|
|
122 |
|
123 |
-
|
124 |
-
interface.launch(share=True)
|
|
|
1 |
+
#imorting libraries and utulities
|
2 |
+
import psutil
|
3 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
|
4 |
+
import pandas as pd
|
5 |
import torch
|
6 |
import gradio as gr
|
7 |
+
from joblib import Parallel, delayed
|
8 |
+
import matplotlib.pyplot as plt
|
9 |
+
import seaborn as sns
|
10 |
+
import random
|
|
|
|
|
11 |
|
12 |
+
# Load emotion detection
|
13 |
emotion_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-emotion")
|
14 |
emotion_model = AutoModelForSeq2SeqLM.from_pretrained("mrm8488/t5-base-finetuned-emotion")
|
15 |
|
16 |
+
#Load quote generation models
|
17 |
+
text_tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
18 |
+
text_model = AutoModelForCausalLM.from_pretrained("gpt2")
|
|
|
|
|
19 |
|
|
|
20 |
device = torch.device('cpu')
|
21 |
emotion_model.to(device)
|
22 |
text_model.to(device)
|
23 |
|
24 |
+
# Hardcoded fallback quotes for each emotion
|
25 |
+
fallback_quotes = {
|
26 |
+
"sadness": ["Keep going, tough times don't last.", "This too shall pass."],
|
27 |
+
"joy": ["Happiness is here to stay.", "Enjoy this moment."],
|
28 |
+
"love": ["Love is a beautiful journey.", "Cherish every moment of love."],
|
29 |
+
"anger": ["Take a deep breath, you'll get through this.", "Channel your anger into something positive."],
|
30 |
+
"fear": ["Courage is not the absence of fear.", "Face your fears, and they will disappear."],
|
31 |
+
"surprise": ["Embrace the unexpected.", "Life is full of pleasant surprises."]
|
32 |
+
}
|
33 |
|
34 |
+
# Function to detect emotion from the user's input text
|
|
|
35 |
def get_emotion(text):
|
36 |
input_ids = emotion_tokenizer.encode(text + '</s>', return_tensors='pt').to(device)
|
37 |
output = emotion_model.generate(input_ids=input_ids, max_length=2)
|
38 |
+
emotion = emotion_tokenizer.decode(output[0], skip_special_tokens=True).strip()
|
39 |
+
return emotion
|
40 |
+
|
41 |
+
|
42 |
+
# Function to generate a motivational quote based on the detected emotion
|
43 |
+
def generate_quote(original_text, emotion):
|
44 |
+
input_text = f"Generate an inspiring quote based on the following emotion:\nEmotion: {emotion}\nText: {original_text}\nQuote:"
|
45 |
+
input_ids = text_tokenizer(input_text, return_tensors="pt").to(device)
|
46 |
+
|
47 |
+
try:
|
48 |
+
outputs = text_model.generate(
|
49 |
+
**input_ids,
|
50 |
+
max_length=100,
|
51 |
+
do_sample=True,
|
52 |
+
top_k=50, # Randomness control
|
53 |
+
top_p=0.95, # Probability control for better diversity
|
54 |
+
temperature=0.7
|
55 |
+
)
|
56 |
+
generated_text = text_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
57 |
+
if "Quote:" in generated_text:
|
58 |
+
quote = generated_text.split("Quote:")[1].strip().split("\n")[0]
|
59 |
+
else:
|
60 |
+
quote = generated_text.strip()
|
61 |
+
return quote
|
62 |
+
except:
|
63 |
+
return random.choice(fallback_quotes.get(emotion, ["Stay positive.", "Keep pushing forward."]))
|
64 |
+
|
65 |
+
|
66 |
+
#For file input
|
67 |
+
def load_and_process_data(file_path):
|
68 |
+
df = pd.read_csv(file_path)
|
69 |
+
if 'Answer' in df.columns:
|
70 |
+
df = df[['Answer']]
|
71 |
+
df['Emotion'] = df['Answer'].apply(get_emotion)
|
72 |
else:
|
73 |
+
raise ValueError("The expected column 'Answer' is not present in the dataset.")
|
74 |
+
return df
|
75 |
+
|
76 |
+
# Function to generate synthetic journal data for testing
|
77 |
+
def generate_synthetic_data(num_entries=100):
|
78 |
+
emotions = ["sadness", "joy", "love", "anger", "fear", "surprise"]
|
79 |
+
entries = []
|
80 |
+
for _ in range(num_entries):
|
81 |
+
emotion = random.choice(emotions)
|
82 |
+
text = f"This is a synthetic entry expressing {emotion}."
|
83 |
+
entries.append({"Answer": text, "Emotion": emotion})
|
84 |
+
return pd.DataFrame(entries)
|
85 |
+
|
86 |
+
# Function to process data in chunks (for parallel processing)
|
87 |
+
def process_chunk(chunk):
|
88 |
+
chunk['Inspirational Quote'] = chunk.apply(lambda row: generate_quote(row['Answer'], row['Emotion']), axis=1)
|
89 |
+
return chunk
|
90 |
+
|
91 |
+
def process_in_chunks(df, chunk_size=10):
|
92 |
+
results = Parallel(n_jobs=-1, backend="multiprocessing")(
|
93 |
+
delayed(process_chunk)(df[start:start + chunk_size]) for start in range(0, len(df), chunk_size)
|
94 |
+
)
|
95 |
+
return pd.concat(results, ignore_index=True)
|
96 |
+
|
97 |
+
# Function to plot the emotion distribution graph
|
98 |
+
def plot_emotion_distribution(df):
|
99 |
+
emotion_count = df['Emotion'].value_counts()
|
100 |
+
plt.figure(figsize=(10,6))
|
101 |
+
sns.barplot(x=emotion_count.index, y=emotion_count.values, palette='viridis')
|
102 |
+
plt.title('Sum of Each Emotion')
|
103 |
+
plt.xlabel('Emotion')
|
104 |
+
plt.ylabel('Count')
|
105 |
+
plt.show()
|
106 |
+
|
107 |
+
# Function to detect emotion and generate a quote for a journal entry
|
108 |
def journal_interface(Diary):
|
109 |
+
emotion = get_emotion(Diary)
|
110 |
+
quote = generate_quote(Diary, emotion)
|
111 |
+
return emotion, quote
|
112 |
+
|
113 |
+
# Function to track CPU utilization during processing
|
114 |
+
def get_cpu_utilization():
|
115 |
+
return f"Current CPU Usage: {psutil.cpu_percent()}%"
|
116 |
+
|
117 |
+
# Main function to load the data, process it in chunks, and generate a graph
|
118 |
+
def main(file_path):
|
119 |
+
df = load_and_process_data(file_path)
|
120 |
+
print(file_path)
|
121 |
+
processed_df = process_in_chunks(df)
|
122 |
+
plot_emotion_distribution(processed_df)
|
123 |
+
print(get_cpu_utilization())
|
124 |
+
return processed_df
|
125 |
+
|
126 |
+
# Gradio interface for inputting thoughts and generating emotion/quote
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
interface = gr.Interface(
|
128 |
fn=journal_interface,
|
129 |
inputs=gr.Textbox(lines=5, placeholder="Enter your thoughts here..."),
|
130 |
outputs=[
|
131 |
gr.Textbox(label="Detected Emotion"),
|
132 |
+
gr.Textbox(label="Generated Quote")
|
|
|
|
|
133 |
],
|
134 |
title="AI-Powered Personal Journal",
|
135 |
+
description="Enter your thoughts, and the AI will detect the emotion and generate an inspirational quote based on it.",
|
136 |
+
theme=gr.themes.Soft()
|
137 |
)
|
138 |
+
# Button to generate the graph and display CPU usage
|
139 |
+
def graph_button(file_path):
|
140 |
+
processed_df = main(file_path)
|
141 |
+
return get_cpu_utilization()
|
142 |
|
143 |
+
interface.launch(share=True)
|
|
requirements.txt
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
-
|
|
|
2 |
torch
|
3 |
gradio
|
4 |
-
|
5 |
-
|
6 |
-
|
|
|
1 |
+
psutil
|
2 |
+
pandas
|
3 |
torch
|
4 |
gradio
|
5 |
+
joblib
|
6 |
+
matplotlib
|
7 |
+
seaborn
|