|
import numpy as np |
|
import pandas as pd |
|
import os |
|
import json |
|
import random |
|
import gradio as gr |
|
from sklearn.ensemble import IsolationForest |
|
from sklearn.model_selection import train_test_split |
|
from sklearn.preprocessing import OneHotEncoder |
|
from sklearn.neural_network import MLPClassifier |
|
from deap import base, creator, tools, algorithms |
|
from transformers import BloomForCausalLM, BloomTokenizerFast |
|
import torch |
|
|
|
|
|
data = { |
|
'context': [ |
|
'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm', |
|
'I am feeling joyful', 'I am grieving', 'I am feeling peaceful', 'I am frustrated', |
|
'I am determined', 'I feel resentment', 'I am feeling glorious', 'I am motivated', |
|
'I am surprised', 'I am fearful', 'I am trusting', 'I feel disgust', 'I am optimistic', |
|
'I am pessimistic', 'I feel bored', 'I am envious' |
|
], |
|
'emotion': [ |
|
'joy', 'sadness', 'anger', 'joy', 'calmness', 'joy', 'grief', 'calmness', 'anger', |
|
'determination', 'resentment', 'glory', 'motivation', 'surprise', 'fear', 'trust', |
|
'disgust', 'optimism', 'pessimism', 'boredom', 'envy' |
|
] |
|
} |
|
df = pd.DataFrame(data) |
|
|
|
|
|
encoder = OneHotEncoder(handle_unknown='ignore') |
|
contexts_encoded = encoder.fit_transform(df[['context']]).toarray() |
|
|
|
|
|
emotions_target = df['emotion'].astype('category').cat.codes |
|
emotion_classes = df['emotion'].astype('category').cat.categories |
|
|
|
|
|
X_train, X_test, y_train, y_test = train_test_split(contexts_encoded, emotions_target, test_size=0.2, random_state=42) |
|
model = MLPClassifier(hidden_layer_sizes=(10, 10), max_iter=1000, random_state=42) |
|
model.fit(X_train, y_train) |
|
|
|
|
|
historical_data = np.array([model.predict(contexts_encoded)]).T |
|
isolation_forest = IsolationForest(contamination=0.1, random_state=42) |
|
isolation_forest.fit(historical_data) |
|
|
|
|
|
emotions = { |
|
'joy': {'percentage': 10, 'motivation': 'positive'}, |
|
'pleasure': {'percentage': 10, 'motivation': 'selfish'}, |
|
'sadness': {'percentage': 10, 'motivation': 'negative'}, |
|
'grief': {'percentage': 10, 'motivation': 'negative'}, |
|
'anger': {'percentage': 10, 'motivation': 'traumatic or strong'}, |
|
'calmness': {'percentage': 10, 'motivation': 'neutral'}, |
|
'determination': {'percentage': 10, 'motivation': 'positive'}, |
|
'resentment': {'percentage': 10, 'motivation': 'negative'}, |
|
'glory': {'percentage': 10, 'motivation': 'positive'}, |
|
'motivation': {'percentage': 10, 'motivation': 'positive'}, |
|
'ideal_state': {'percentage': 100, 'motivation': 'balanced'}, |
|
'fear': {'percentage': 10, 'motivation': 'defensive'}, |
|
'surprise': {'percentage': 10, 'motivation': 'unexpected'}, |
|
'anticipation': {'percentage': 10, 'motivation': 'predictive'}, |
|
'trust': {'percentage': 10, 'motivation': 'reliable'}, |
|
'disgust': {'percentage': 10, 'motivation': 'repulsive'}, |
|
'optimism': {'percentage': 10, 'motivation': 'hopeful'}, |
|
'pessimism': {'percentage': 10, 'motivation': 'doubtful'}, |
|
'boredom': {'percentage': 10, 'motivation': 'indifferent'}, |
|
'envy': {'percentage': 10, 'motivation': 'jealous'} |
|
} |
|
|
|
|
|
total_percentage = 200 |
|
default_percentage = total_percentage / len(emotions) |
|
for emotion in emotions: |
|
emotions[emotion]['percentage'] = default_percentage |
|
|
|
emotion_history_file = 'emotion_history.json' |
|
|
|
|
|
def load_historical_data(file_path=emotion_history_file): |
|
if os.path.exists(file_path): |
|
with open(file_path, 'r') as file: |
|
return json.load(file) |
|
return [] |
|
|
|
|
|
def save_historical_data(historical_data, file_path=emotion_history_file): |
|
with open(file_path, 'w') as file: |
|
json.dump(historical_data, file) |
|
|
|
|
|
emotion_history = load_historical_data() |
|
|
|
|
|
def update_emotion(emotion, percentage): |
|
emotions['ideal_state']['percentage'] -= percentage |
|
emotions[emotion]['percentage'] += percentage |
|
|
|
|
|
total_current = sum(e['percentage'] for e in emotions.values()) |
|
adjustment = total_percentage - total_current |
|
emotions['ideal_state']['percentage'] += adjustment |
|
|
|
|
|
def normalize_context(context): |
|
return context.lower().strip() |
|
|
|
|
|
def evolve_emotions(): |
|
|
|
def evaluate(individual): |
|
ideal_state = individual[-1] |
|
other_emotions = individual[:-1] |
|
return abs(ideal_state - 100), sum(other_emotions) |
|
|
|
|
|
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0)) |
|
creator.create("Individual", list, fitness=creator.FitnessMin) |
|
|
|
|
|
toolbox = base.Toolbox() |
|
toolbox.register("attribute", lambda: random.uniform(0, 20)) |
|
toolbox.register("individual", tools.initCycle, creator.Individual, toolbox.attribute, n=(len(emotions) - 1)) |
|
toolbox.register("ideal_state", lambda: random.uniform(80, 120)) |
|
toolbox.register("complete_individual", tools.initConcat, creator.Individual, toolbox.individual, toolbox.ideal_state) |
|
toolbox.register("population", tools.initRepeat, list, toolbox.complete_individual) |
|
|
|
|
|
toolbox.register("evaluate", evaluate) |
|
toolbox.register("mate", tools.cxBlend, alpha=0.5) |
|
toolbox.register("mutate", tools.mutGaussian, mu=10, sigma=5, indpb=0.3) |
|
toolbox.register("select", tools.selTournament, tournsize=3) |
|
|
|
|
|
population = toolbox.population(n=10) |
|
|
|
|
|
population, log = algorithms.eaSimple(population, toolbox, cxpb=0.5, mutpb=0.2, ngen=20, verbose=False) |
|
|
|
|
|
best_individual = tools.selBest(population, k=1)[0] |
|
for idx, emotion in enumerate(emotions.keys()): |
|
emotions[emotion]['percentage'] = best_individual[idx] |
|
|
|
|
|
def get_emotional_response(context): |
|
|
|
context = normalize_context(context) |
|
|
|
|
|
context_encoded = encoder.transform([[context]]).toarray() |
|
prediction = model.predict(context_encoded) |
|
predicted_emotion = emotion_classes[prediction[0]] |
|
|
|
|
|
anomaly_score = isolation_forest.decision_function([prediction])[0] |
|
if anomaly_score < -0.5: |
|
print("Anomalous context detected. Adjusting emotional response.") |
|
update_emotion('calmness', 20) |
|
else: |
|
|
|
if predicted_emotion == 'joy': |
|
update_emotion('joy', 20) |
|
update_emotion('pleasure', 20) |
|
elif predicted_emotion == 'sadness': |
|
update_emotion('sadness', 20) |
|
update_emotion('grief', 20) |
|
elif predicted_emotion == 'anger': |
|
update_emotion('anger', 20) |
|
elif predicted_emotion == 'determination': |
|
update_emotion('determination', 20) |
|
elif predicted_emotion == 'resentment': |
|
update_emotion('resentment', 20) |
|
elif predicted_emotion == 'glory': |
|
update_emotion('glory', 20) |
|
elif predicted_emotion == 'motivation': |
|
update_emotion('motivation', 20) |
|
elif predicted_emotion == 'surprise': |
|
update_emotion('surprise', 20) |
|
elif predicted_emotion == 'fear': |
|
update_emotion('fear', 20) |
|
elif predicted_emotion == 'trust': |
|
update_emotion('trust', 20) |
|
elif predicted_emotion == 'disgust': |
|
update_emotion('disgust', 20) |
|
elif predicted_emotion == 'optimism': |
|
update_emotion('optimism', 20) |
|
elif predicted_emotion == 'pessimism': |
|
update_emotion('pessimism', 20) |
|
elif predicted_emotion == 'boredom': |
|
update_emotion('boredom', 20) |
|
elif predicted_emotion == 'envy': |
|
update_emotion('envy', 20) |
|
else: |
|
update_emotion('calmness', 20) |
|
|
|
|
|
emotion_state = {emotion: data['percentage'] for emotion, data in emotions.items()} |
|
emotion_history.append(emotion_state) |
|
|
|
|
|
save_historical_data(emotion_history) |
|
|
|
|
|
response = "" |
|
for emotion, data in emotions.items(): |
|
response += f"{emotion.capitalize()}: {data['percentage']:.2f}% ({data['motivation']} motivation)\n" |
|
|
|
return response |
|
|
|
|
|
def handle_idle_state(): |
|
evolve_emotions() |
|
response = "Emotions evolved\n" |
|
for emotion, data in emotions.items(): |
|
response += f"{emotion.capitalize()}: {data['percentage']:.2f}% ({data['motivation']} motivation)\n" |
|
return response |
|
|
|
|
|
class SOUL: |
|
def __init__(self, model_name='bigscience/bloom-1b1'): |
|
self.tokenizer = BloomTokenizerFast.from_pretrained(model_name) |
|
self.model = BloomForCausalLM.from_pretrained(model_name) |
|
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
self.model.to(self.device) |
|
|
|
def generate_text(self, prompt, max_length=100): |
|
inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device) |
|
|
|
|
|
with torch.no_grad(): |
|
generate_ids = self.model.generate( |
|
inputs.input_ids, |
|
max_length=max_length, |
|
num_return_sequences=1, |
|
no_repeat_ngram_size=2, |
|
do_sample=True, |
|
top_k=50, |
|
top_p=0.95, |
|
temperature=0.7 |
|
) |
|
|
|
return self.tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] |
|
|
|
def bridge_ai(self, prompt): |
|
|
|
bloom_response = self.generate_text(prompt) |
|
|
|
|
|
emotional_response = get_emotional_response(bloom_response) |
|
|
|
return bloom_response, emotional_response |
|
|
|
|
|
soul = SOUL() |
|
|
|
def interact_with_soul(user_input): |
|
bloom_response, emotional_response = soul.bridge_ai(user_input) |
|
return bloom_response, emotional_response |
|
|
|
iface = gr.Interface( |
|
fn=interact_with_soul, |
|
inputs="text", |
|
outputs=["text", "text"], |
|
title="S.O.U.L AI", |
|
description="Enter a prompt to interact with the S.O.U.L AI, which will generate a response and provide an emotional analysis." |
|
) |
|
|
|
iface.launch() |
|
|