Spaces:
Running
Running
Demosthene-OR
commited on
Commit
•
865221f
1
Parent(s):
6be619f
....
Browse files- app.py +0 -2
- tabs/chatbot_tab.py +2 -5
app.py
CHANGED
@@ -116,7 +116,6 @@ def run():
|
|
116 |
if (llm_choice == "OpenAI 3.5") : st.session_state.model = "gpt-3.5-turbo"
|
117 |
elif (llm_choice == "OpenAI 4o") : st.session_state.model = "gpt-4o"
|
118 |
else: st.session_state.model = "mistral-large-latest"
|
119 |
-
|
120 |
if (llm_choice in ["OpenAI 3.5","OpenAI 4o"]) and ('OPENAI_API_KEY' not in st.session_state):
|
121 |
# Set OpenAI API key
|
122 |
st.sidebar.subheader("OpenAI API Key")
|
@@ -126,7 +125,6 @@ def run():
|
|
126 |
st.session_state['OPENAI_API_KEY'] = openai_api_key
|
127 |
st.sidebar.success("OpenAI API Key set successfully.")
|
128 |
|
129 |
-
st.session_state.model = "mistral-large-latest"
|
130 |
with st.sidebar:
|
131 |
l = st.selectbox("langue:",lang_tgt, format_func = find_lang_label, key="Language", label_visibility="hidden")
|
132 |
st.session_state.language_label = find_lang_label_en(l)
|
|
|
116 |
if (llm_choice == "OpenAI 3.5") : st.session_state.model = "gpt-3.5-turbo"
|
117 |
elif (llm_choice == "OpenAI 4o") : st.session_state.model = "gpt-4o"
|
118 |
else: st.session_state.model = "mistral-large-latest"
|
|
|
119 |
if (llm_choice in ["OpenAI 3.5","OpenAI 4o"]) and ('OPENAI_API_KEY' not in st.session_state):
|
120 |
# Set OpenAI API key
|
121 |
st.sidebar.subheader("OpenAI API Key")
|
|
|
125 |
st.session_state['OPENAI_API_KEY'] = openai_api_key
|
126 |
st.sidebar.success("OpenAI API Key set successfully.")
|
127 |
|
|
|
128 |
with st.sidebar:
|
129 |
l = st.selectbox("langue:",lang_tgt, format_func = find_lang_label, key="Language", label_visibility="hidden")
|
130 |
st.session_state.language_label = find_lang_label_en(l)
|
tabs/chatbot_tab.py
CHANGED
@@ -286,7 +286,7 @@ def play_audio(custom_sentence, Lang_target, speed=1.0):
|
|
286 |
|
287 |
|
288 |
def run():
|
289 |
-
global thread_id, config, model_speech, language,prompt,model
|
290 |
|
291 |
st.write("")
|
292 |
st.write("")
|
@@ -308,10 +308,8 @@ def run():
|
|
308 |
model = ChatOpenAI(model=st.session_state.model,
|
309 |
temperature=0.8, # Adjust creativity level
|
310 |
max_tokens=150 # Define max output token limit
|
311 |
-
)
|
312 |
-
|
313 |
else:
|
314 |
-
st.session_state.model = "mistral-large-latest"
|
315 |
model = ChatMistralAI(model=st.session_state.model)
|
316 |
|
317 |
|
@@ -420,7 +418,6 @@ def run():
|
|
420 |
with st.chat_message(message["role"]):
|
421 |
st.markdown(message["content"])
|
422 |
else:
|
423 |
-
st.write("")
|
424 |
st.write("")
|
425 |
st.write("**thread_id:** "+thread_id)
|
426 |
st.write("")
|
|
|
286 |
|
287 |
|
288 |
def run():
|
289 |
+
global thread_id, config, model_speech, language,prompt,model, model_name
|
290 |
|
291 |
st.write("")
|
292 |
st.write("")
|
|
|
308 |
model = ChatOpenAI(model=st.session_state.model,
|
309 |
temperature=0.8, # Adjust creativity level
|
310 |
max_tokens=150 # Define max output token limit
|
311 |
+
)
|
|
|
312 |
else:
|
|
|
313 |
model = ChatMistralAI(model=st.session_state.model)
|
314 |
|
315 |
|
|
|
418 |
with st.chat_message(message["role"]):
|
419 |
st.markdown(message["content"])
|
420 |
else:
|
|
|
421 |
st.write("")
|
422 |
st.write("**thread_id:** "+thread_id)
|
423 |
st.write("")
|