import gradio as gr import os import json from dotenv import load_dotenv import requests from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline from huggingface_hub import login from datetime import datetime import numpy as np import torch from gtts import gTTS import tempfile from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer import torch # Load environment variables from .env file load_dotenv() token = os.getenv("HF_TOKEN") # Use the token in the login function login(token=token) # File paths for storing model configurations and chat history MODEL_CONFIG_FILE = "model_config.json" CHAT_HISTORY_FILE = "chat_history.json" # Load model configurations from a JSON file (if exists) def load_model_config(): if os.path.exists(MODEL_CONFIG_FILE): with open(MODEL_CONFIG_FILE, 'r') as f: return json.load(f) return { "gpt-4": { "endpoint": "https://roger-m38jr9pd-eastus2.openai.azure.com/openai/deployments/gpt-4/chat/completions?api-version=2024-08-01-preview", "api_key": os.getenv("GPT4_API_KEY"), "model_path": None # No model path for API models }, "gpt-4o": { "endpoint": "https://roger-m38jr9pd-eastus2.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2024-08-01-preview", "api_key": os.getenv("GPT4O_API_KEY"), "model_path": None }, "gpt-35-turbo": { "endpoint": "https://rogerkoranteng.openai.azure.com/openai/deployments/gpt-35-turbo/chat/completions?api-version=2024-08-01-preview", "api_key": os.getenv("GPT35_TURBO_API_KEY"), "model_path": None }, "gpt-4-32k": { "endpoint": "https://roger-m38orjxq-australiaeast.openai.azure.com/openai/deployments/gpt-4-32k/chat/completions?api-version=2024-08-01-preview", "api_key": os.getenv("GPT4_32K_API_KEY"), "model_path": None } } predefined_messages = { "feeling_sad": "Hello, I am feeling sad today, what should I do?", "Nobody likes me": "Hello, Sage. I feel like nobody likes me. What should I do?", 'Boyfriend broke up': "Hi Sage, my boyfriend broke up with me. I'm feeling so sad. What should I do?", 'I am lonely': "Hi Sage, I am feeling lonely. What should I do?", 'I am stressed': "Hi Sage, I am feeling stressed. What should I do?", 'I am anxious': "Hi Sage, I am feeling anxious. What should I do?", } # Save model configuration to JSON def save_model_config(): with open(MODEL_CONFIG_FILE, 'w') as f: json.dump(model_config, f, indent=4) # Load chat history from a JSON file def load_chat_history(): if os.path.exists(CHAT_HISTORY_FILE): with open(CHAT_HISTORY_FILE, 'r') as f: return json.load(f) return [] # Save chat history to a JSON file def save_chat_history(chat_history): with open(CHAT_HISTORY_FILE, 'w') as f: json.dump(chat_history, f, indent=4) # Define model configurations model_config = load_model_config() # Function to dynamically add downloaded model to model_config def add_downloaded_model(model_name, model_path): model_config[model_name] = { "endpoint": None, "model_path": model_path, "api_key": None } save_model_config() return list(model_config.keys()) # Function to download model from Hugging Face synchronously def download_model(model_name): try: tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) model_path = f"./models/{model_name}" os.makedirs(model_path, exist_ok=True) model.save_pretrained(model_path) tokenizer.save_pretrained(model_path) updated_models = add_downloaded_model(model_name, model_path) return f"Model '{model_name}' downloaded and added.", updated_models except Exception as e: return f"Error downloading model '{model_name}': {e}", list(model_config.keys()) # Chat function using the selected model def generate_response(model_choice, user_message, chat_history): model_info = model_config.get(model_choice) if not model_info: return "Invalid model selection. Please choose a valid model.", chat_history chat_history.append({"role": "user", "content": user_message}) headers = {"Content-Type": "application/json"} # Check if the model is an API model (it will have an endpoint) if model_info["endpoint"]: if model_info["api_key"]: headers["api-key"] = model_info["api_key"] data = {"messages": chat_history, "max_tokens": 1500, "temperature": 0.7} try: # Send request to the API model endpoint response = requests.post(model_info["endpoint"], headers=headers, json=data) response.raise_for_status() assistant_message = response.json()['choices'][0]['message']['content'] chat_history.append({"role": "assistant", "content": assistant_message}) save_chat_history(chat_history) # Save chat history to JSON except requests.exceptions.RequestException as e: assistant_message = f"Error: {e}" chat_history.append({"role": "assistant", "content": assistant_message}) save_chat_history(chat_history) else: # If it's a local model, load the model and tokenizer from the local path model_path = model_info["model_path"] try: tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForCausalLM.from_pretrained(model_path) inputs = tokenizer(user_message, return_tensors="pt") outputs = model.generate(inputs['input_ids'], max_length=500, num_return_sequences=1) assistant_message = tokenizer.decode(outputs[0], skip_special_tokens=True) chat_history.append({"role": "assistant", "content": assistant_message}) save_chat_history(chat_history) except Exception as e: assistant_message = f"Error loading model locally: {e}" chat_history.append({"role": "assistant", "content": assistant_message}) save_chat_history(chat_history) # Convert the assistant message to audio tts = gTTS(assistant_message) audio_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") tts.save(audio_file.name) return chat_history, audio_file.name # Function to format chat history with custom bubble styles def format_chat_bubble(history): formatted_history = "" for message in history: timestamp = datetime.now().strftime("%H:%M:%S") if message["role"] == "user": formatted_history += f'''