Spaces:
Build error
Build error
import gradio as gr | |
import os | |
import requests | |
from dotenv import load_dotenv | |
# Load environment variables from .env file | |
load_dotenv() | |
# Define endpoints and securely load API keys from environment variables | |
model_config = { | |
"gpt-4": { | |
"endpoint": "https://roger-m38jr9pd-eastus2.openai.azure.com/openai/deployments/gpt-4/chat/completions?api-version=2024-08-01-preview", | |
"api_key": os.getenv("GPT4_API_KEY") | |
}, | |
"gpt-4o": { | |
"endpoint": "https://roger-m38jr9pd-eastus2.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2024-08-01-preview", | |
"api_key": os.getenv("GPT4O_API_KEY") | |
}, | |
"gpt-35-turbo": { | |
"endpoint": "https://rogerkoranteng.openai.azure.com/openai/deployments/gpt-35-turbo/chat/completions?api-version=2024-08-01-preview", | |
"api_key": os.getenv("GPT35_TURBO_API_KEY") | |
}, | |
"gpt-4-32k": { | |
"endpoint": "https://roger-m38orjxq-australiaeast.openai.azure.com/openai/deployments/gpt-4-32k/chat/completions?api-version=2024-08-01-preview", | |
"api_key": os.getenv("GPT4_32K_API_KEY") | |
} | |
} | |
# Function to generate a response from Azure OpenAI | |
def generate_response(model_choice, user_message, chat_history): | |
model_info = model_config.get(model_choice) | |
if not model_info: | |
return "Invalid model selection. Please choose a valid model.", chat_history | |
chat_history.append({"role": "user", "content": user_message}) | |
headers = { | |
"Content-Type": "application/json", | |
"api-key": model_info['api_key'], | |
} | |
data = { | |
"messages": chat_history, | |
"max_tokens": 150, | |
"temperature": 0.7, | |
} | |
try: | |
response = requests.post( | |
model_info["endpoint"], | |
headers=headers, | |
json=data | |
) | |
response.raise_for_status() | |
response_data = response.json() | |
assistant_message = response_data['choices'][0]['message']['content'] | |
chat_history.append({"role": "assistant", "content": assistant_message}) | |
return assistant_message, chat_history | |
except requests.exceptions.RequestException as e: | |
return f"Error: {e}", chat_history | |
# Function to format chat history with improved readability | |
def format_chat_history(history): | |
formatted_history = "" | |
for message in history: | |
role = "User:" if message["role"] == "user" else "Sage:" | |
formatted_history += f"{role} {message['content']}\n\n" # Message content with a line break | |
return formatted_history.strip() # Remove any trailing whitespace | |
# List of available models | |
azure_models = [ | |
"gpt-4", | |
"gpt-4o", | |
"gpt-35-turbo", | |
"gpt-4-32k" | |
] | |
# Function to handle model change status update | |
def change_model(model_choice): | |
return f"Selected model: {model_choice}" | |
# Create the Gradio interface | |
with gr.Blocks() as interface: | |
gr.Markdown("## Sage - Your Mental Health Advisor") | |
with gr.Tab("Model Selection"): | |
gr.Markdown("### Select Model for Chat") | |
model_dropdown = gr.Dropdown( | |
choices=azure_models, | |
label="Choose a Model", | |
value=azure_models[0], # Default model | |
interactive=True | |
) | |
# Add status update on model change | |
status_textbox = gr.Textbox(label="Model Selection Status", value="Selected model: gpt-4", interactive=False) | |
model_dropdown.change(change_model, inputs=model_dropdown, outputs=status_textbox) | |
gr.Markdown("The selected model will be used for chat interaction.") | |
with gr.Tab("Chat Interface"): | |
gr.Markdown("### Chat with Sage - Your Mental Health Advisor") | |
chat_history_state = gr.State([]) # Store chat history | |
model_choice_state = gr.State(azure_models[0]) # Default model | |
user_message = gr.Textbox( | |
placeholder="Hello, I am Sage. How can I assist you today?", | |
label="Your Message", | |
lines=2, | |
scale=7 | |
) | |
model_dropdown.change( | |
lambda x: x, inputs=model_dropdown, outputs=model_choice_state | |
) | |
assistant_response = gr.Textbox(label="Assistant's Response") | |
submit_button = gr.Button("Send Message") | |
submit_button.click( | |
generate_response, | |
inputs=[model_choice_state, user_message, chat_history_state], | |
outputs=[assistant_response, chat_history_state] | |
) | |
# Chat History Tab | |
with gr.Tab("Chat History"): | |
gr.Markdown("### Chat History") | |
# Add the fetch history button inside the "Chat History" tab | |
fetch_history_button = gr.Button("Fetch Chat History") | |
chat_history_display = gr.Textbox(label="Chat History", interactive=False, lines=10) | |
# Function to fetch and display chat history | |
def fetch_chat_history(chat_history): | |
return format_chat_history(chat_history) | |
# Bind the fetch history button to fetch the chat history | |
fetch_history_button.click( | |
fetch_chat_history, | |
inputs=chat_history_state, | |
outputs=chat_history_display | |
) | |
# Launch the Gradio app | |
interface.launch(server_name="0.0.0.0", server_port=8080, share=True) | |