File size: 5,241 Bytes
3506b46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
import gradio as gr
import os
import requests
from dotenv import load_dotenv

# Load environment variables from .env file
load_dotenv()

# Define endpoints and securely load API keys from environment variables
model_config = {
    "gpt-4": {
        "endpoint": "https://roger-m38jr9pd-eastus2.openai.azure.com/openai/deployments/gpt-4/chat/completions?api-version=2024-08-01-preview",
        "api_key": os.getenv("GPT4_API_KEY")
    },
    "gpt-4o": {
        "endpoint": "https://roger-m38jr9pd-eastus2.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2024-08-01-preview",
        "api_key": os.getenv("GPT4O_API_KEY")
    },
    "gpt-35-turbo": {
        "endpoint": "https://rogerkoranteng.openai.azure.com/openai/deployments/gpt-35-turbo/chat/completions?api-version=2024-08-01-preview",
        "api_key": os.getenv("GPT35_TURBO_API_KEY")
    },
    "gpt-4-32k": {
        "endpoint": "https://roger-m38orjxq-australiaeast.openai.azure.com/openai/deployments/gpt-4-32k/chat/completions?api-version=2024-08-01-preview",
        "api_key": os.getenv("GPT4_32K_API_KEY")
    }
}


# Function to generate a response from Azure OpenAI
def generate_response(model_choice, user_message, chat_history):
    model_info = model_config.get(model_choice)

    if not model_info:
        return "Invalid model selection. Please choose a valid model.", chat_history

    chat_history.append({"role": "user", "content": user_message})

    headers = {
        "Content-Type": "application/json",
        "api-key": model_info['api_key'],
    }

    data = {
        "messages": chat_history,
        "max_tokens": 150,
        "temperature": 0.7,
    }

    try:
        response = requests.post(
            model_info["endpoint"],
            headers=headers,
            json=data
        )

        response.raise_for_status()
        response_data = response.json()

        assistant_message = response_data['choices'][0]['message']['content']
        chat_history.append({"role": "assistant", "content": assistant_message})

        return assistant_message, chat_history

    except requests.exceptions.RequestException as e:
        return f"Error: {e}", chat_history


# Function to format chat history with improved readability
def format_chat_history(history):
    formatted_history = ""
    for message in history:
        role = "User:" if message["role"] == "user" else "Sage:"
        formatted_history += f"{role} {message['content']}\n\n"  # Message content with a line break
    return formatted_history.strip()  # Remove any trailing whitespace


# List of available models
azure_models = [
    "gpt-4",
    "gpt-4o",
    "gpt-35-turbo",
    "gpt-4-32k"
]


# Function to handle model change status update
def change_model(model_choice):
    return f"Selected model: {model_choice}"


# Create the Gradio interface
with gr.Blocks() as interface:
    gr.Markdown("## Sage - Your Mental Health Advisor")

    with gr.Tab("Model Selection"):
        gr.Markdown("### Select Model for Chat")
        model_dropdown = gr.Dropdown(
            choices=azure_models,
            label="Choose a Model",
            value=azure_models[0],  # Default model
            interactive=True
        )

        # Add status update on model change
        status_textbox = gr.Textbox(label="Model Selection Status", value="Selected model: gpt-4", interactive=False)
        model_dropdown.change(change_model, inputs=model_dropdown, outputs=status_textbox)

        gr.Markdown("The selected model will be used for chat interaction.")

    with gr.Tab("Chat Interface"):
        gr.Markdown("### Chat with Sage - Your Mental Health Advisor")
        chat_history_state = gr.State([])  # Store chat history
        model_choice_state = gr.State(azure_models[0])  # Default model
        user_message = gr.Textbox(
            placeholder="Hello, I am Sage. How can I assist you today?",
            label="Your Message",
            lines=2,
            scale=7
        )

        model_dropdown.change(
            lambda x: x, inputs=model_dropdown, outputs=model_choice_state
        )

        assistant_response = gr.Textbox(label="Assistant's Response")
        submit_button = gr.Button("Send Message")

        submit_button.click(
            generate_response,
            inputs=[model_choice_state, user_message, chat_history_state],
            outputs=[assistant_response, chat_history_state]
        )

    # Chat History Tab
    with gr.Tab("Chat History"):
        gr.Markdown("### Chat History")

        # Add the fetch history button inside the "Chat History" tab
        fetch_history_button = gr.Button("Fetch Chat History")
        chat_history_display = gr.Textbox(label="Chat History", interactive=False, lines=10)


        # Function to fetch and display chat history
        def fetch_chat_history(chat_history):
            return format_chat_history(chat_history)


        # Bind the fetch history button to fetch the chat history
        fetch_history_button.click(
            fetch_chat_history,
            inputs=chat_history_state,
            outputs=chat_history_display
        )

    # Launch the Gradio app
    interface.launch(server_name="0.0.0.0", server_port=8080, share=True)