AI_app / app.py
yaoyaoNo1's picture
mona's implementation
ef33d58
raw
history blame
5.55 kB
#imorting libraries and utulities
import psutil
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
import pandas as pd
import torch
import gradio as gr
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
import seaborn as sns
import random
# Load emotion detection
emotion_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-emotion")
emotion_model = AutoModelForSeq2SeqLM.from_pretrained("mrm8488/t5-base-finetuned-emotion")
#Load quote generation models
text_tokenizer = AutoTokenizer.from_pretrained("gpt2")
text_model = AutoModelForCausalLM.from_pretrained("gpt2")
device = torch.device('cpu')
emotion_model.to(device)
text_model.to(device)
# Hardcoded fallback quotes for each emotion
fallback_quotes = {
"sadness": ["Keep going, tough times don't last.", "This too shall pass."],
"joy": ["Happiness is here to stay.", "Enjoy this moment."],
"love": ["Love is a beautiful journey.", "Cherish every moment of love."],
"anger": ["Take a deep breath, you'll get through this.", "Channel your anger into something positive."],
"fear": ["Courage is not the absence of fear.", "Face your fears, and they will disappear."],
"surprise": ["Embrace the unexpected.", "Life is full of pleasant surprises."]
}
# Function to detect emotion from the user's input text
def get_emotion(text):
input_ids = emotion_tokenizer.encode(text + '</s>', return_tensors='pt').to(device)
output = emotion_model.generate(input_ids=input_ids, max_length=2)
emotion = emotion_tokenizer.decode(output[0], skip_special_tokens=True).strip()
return emotion
# Function to generate a motivational quote based on the detected emotion
def generate_quote(original_text, emotion):
input_text = f"Generate an inspiring quote based on the following emotion:\nEmotion: {emotion}\nText: {original_text}\nQuote:"
input_ids = text_tokenizer(input_text, return_tensors="pt").to(device)
try:
outputs = text_model.generate(
**input_ids,
max_length=100,
do_sample=True,
top_k=50, # Randomness control
top_p=0.95, # Probability control for better diversity
temperature=0.7
)
generated_text = text_tokenizer.decode(outputs[0], skip_special_tokens=True)
if "Quote:" in generated_text:
quote = generated_text.split("Quote:")[1].strip().split("\n")[0]
else:
quote = generated_text.strip()
return quote
except:
return random.choice(fallback_quotes.get(emotion, ["Stay positive.", "Keep pushing forward."]))
#For file input
def load_and_process_data(file_path):
df = pd.read_csv(file_path)
if 'Answer' in df.columns:
df = df[['Answer']]
df['Emotion'] = df['Answer'].apply(get_emotion)
else:
raise ValueError("The expected column 'Answer' is not present in the dataset.")
return df
# Function to generate synthetic journal data for testing
def generate_synthetic_data(num_entries=100):
emotions = ["sadness", "joy", "love", "anger", "fear", "surprise"]
entries = []
for _ in range(num_entries):
emotion = random.choice(emotions)
text = f"This is a synthetic entry expressing {emotion}."
entries.append({"Answer": text, "Emotion": emotion})
return pd.DataFrame(entries)
# Function to process data in chunks (for parallel processing)
def process_chunk(chunk):
chunk['Inspirational Quote'] = chunk.apply(lambda row: generate_quote(row['Answer'], row['Emotion']), axis=1)
return chunk
def process_in_chunks(df, chunk_size=10):
results = Parallel(n_jobs=-1, backend="multiprocessing")(
delayed(process_chunk)(df[start:start + chunk_size]) for start in range(0, len(df), chunk_size)
)
return pd.concat(results, ignore_index=True)
# Function to plot the emotion distribution graph
def plot_emotion_distribution(df):
emotion_count = df['Emotion'].value_counts()
plt.figure(figsize=(10,6))
sns.barplot(x=emotion_count.index, y=emotion_count.values, palette='viridis')
plt.title('Sum of Each Emotion')
plt.xlabel('Emotion')
plt.ylabel('Count')
plt.show()
# Function to detect emotion and generate a quote for a journal entry
def journal_interface(Diary):
emotion = get_emotion(Diary)
quote = generate_quote(Diary, emotion)
return emotion, quote
# Function to track CPU utilization during processing
def get_cpu_utilization():
return f"Current CPU Usage: {psutil.cpu_percent()}%"
# Main function to load the data, process it in chunks, and generate a graph
def main(file_path):
df = load_and_process_data(file_path)
print(file_path)
processed_df = process_in_chunks(df)
plot_emotion_distribution(processed_df)
print(get_cpu_utilization())
return processed_df
# Gradio interface for inputting thoughts and generating emotion/quote
interface = gr.Interface(
fn=journal_interface,
inputs=gr.Textbox(lines=5, placeholder="Enter your thoughts here..."),
outputs=[
gr.Textbox(label="Detected Emotion"),
gr.Textbox(label="Generated Quote")
],
title="AI-Powered Personal Journal",
description="Enter your thoughts, and the AI will detect the emotion and generate an inspirational quote based on it.",
theme=gr.themes.Soft()
)
# Button to generate the graph and display CPU usage
def graph_button(file_path):
processed_df = main(file_path)
return get_cpu_utilization()
interface.launch(share=True)