eduardo-alvarez commited on
Commit
94e6643
1 Parent(s): dd7478e

temporarily removing chat /w top model feature

Browse files
Files changed (1) hide show
  1. app.py +55 -61
app.py CHANGED
@@ -3,12 +3,6 @@ import requests
3
  import os
4
 
5
  import gradio
6
-
7
- # work around due to HF Spaces bug
8
- #if gradio.__version__ != '4.16.0':
9
- # os.system("pip uninstall -y gradio")
10
- # os.system("pip install gradio==4.16.0")
11
-
12
  import gradio as gr
13
 
14
  from info.train_a_model import (
@@ -77,62 +71,62 @@ with demo:
77
  except Exception as e:
78
  return f"❌Failed to submit due to an error: {str(e)}"
79
 
80
- with gr.Accordion("Chat with Top Models on the Leaderboard Here 💬", open=False):
81
-
82
- chat_model_dropdown = gr.Dropdown(
83
- choices=VALIDATED_CHAT_MODELS,
84
- label="Select a leaderboard model to chat with. ",
85
- multiselect=False,
86
- value=VALIDATED_CHAT_MODELS[0],
87
- interactive=True,
88
- )
89
-
90
- #chat_model_selection = chat_model_dropdown.value
91
- chat_model_selection = 'yuriachermann/My_AGI_llama_2_7B'
92
-
93
- def call_api_and_stream_response(query, chat_model):
94
- """
95
- Call the API endpoint and yield characters as they are received.
96
- This function simulates streaming by yielding characters one by one.
97
- """
98
- url = inference_endpoint_url
99
- params = {"query": query, "selected_model": chat_model}
100
- with requests.get(url, json=params, stream=True) as r: # Use params for query parameters
101
- for chunk in r.iter_content(chunk_size=1):
102
- if chunk:
103
- yield chunk.decode()
104
-
105
- def get_response(query, history):
106
- """
107
- Wrapper function to call the streaming API and compile the response.
108
- """
109
- response = ''
110
- for char in call_api_and_stream_response(query, chat_model=chat_model_selection):
111
- if char == '<': # This is stopping condition; adjust as needed.
112
- break
113
- response += char
114
- yield [(f"🤖 Response from LLM: {chat_model_selection}", response)] # Correct format for Gradio Chatbot
115
  #
116
-
117
- chatbot = gr.Chatbot()
118
- msg = gr.Textbox()
119
- submit = gr.Button("Submit")
120
- clear = gr.Button("Clear")
121
- def user(user_message, history):
122
- return "", history + [[user_message, None]]
123
- def clear_chat(*args):
124
- return [] # Returning an empty list to signify clearing the chat, adjust as per Gradio's capabilities
125
- submit.click(
126
- fn=get_response,
127
- inputs=[msg, chatbot],
128
- outputs=chatbot
129
- )
130
- clear.click(
131
- fn=clear_chat,
132
- inputs=None,
133
- outputs=chatbot
134
- )
135
-
 
 
 
 
 
 
 
 
 
 
 
136
 
137
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
138
  with gr.TabItem("🏆 LLM Leaderboard", elem_id="llm-benchmark-table", id=0):
 
3
  import os
4
 
5
  import gradio
 
 
 
 
 
 
6
  import gradio as gr
7
 
8
  from info.train_a_model import (
 
71
  except Exception as e:
72
  return f"❌Failed to submit due to an error: {str(e)}"
73
 
74
+ #with gr.Accordion("Chat with Top Models on the Leaderboard Here 💬", open=False):
75
+ #
76
+ # chat_model_dropdown = gr.Dropdown(
77
+ # choices=VALIDATED_CHAT_MODELS,
78
+ # label="Select a leaderboard model to chat with. ",
79
+ # multiselect=False,
80
+ # value=VALIDATED_CHAT_MODELS[0],
81
+ # interactive=True,
82
+ # )
83
+ #
84
+ # #chat_model_selection = chat_model_dropdown.value
85
+ # chat_model_selection = 'yuriachermann/My_AGI_llama_2_7B'
86
+ #
87
+ # def call_api_and_stream_response(query, chat_model):
88
+ # """
89
+ # Call the API endpoint and yield characters as they are received.
90
+ # This function simulates streaming by yielding characters one by one.
91
+ # """
92
+ # url = inference_endpoint_url
93
+ # params = {"query": query, "selected_model": chat_model}
94
+ # with requests.get(url, json=params, stream=True) as r: # Use params for query parameters
95
+ # for chunk in r.iter_content(chunk_size=1):
96
+ # if chunk:
97
+ # yield chunk.decode()
 
 
 
 
 
 
 
 
 
 
 
98
  #
99
+ # def get_response(query, history):
100
+ # """
101
+ # Wrapper function to call the streaming API and compile the response.
102
+ # """
103
+ # response = ''
104
+ # for char in call_api_and_stream_response(query, chat_model=chat_model_selection):
105
+ # if char == '<': # This is stopping condition; adjust as needed.
106
+ # break
107
+ # response += char
108
+ # yield [(f"🤖 Response from LLM: {chat_model_selection}", response)] # Correct format for Gradio Chatbot
109
+ ##
110
+ #
111
+ # chatbot = gr.Chatbot()
112
+ # msg = gr.Textbox()
113
+ # submit = gr.Button("Submit")
114
+ # clear = gr.Button("Clear")
115
+ # def user(user_message, history):
116
+ # return "", history + [[user_message, None]]
117
+ # def clear_chat(*args):
118
+ # return [] # Returning an empty list to signify clearing the chat, adjust as per Gradio's capabilities
119
+ # submit.click(
120
+ # fn=get_response,
121
+ # inputs=[msg, chatbot],
122
+ # outputs=chatbot
123
+ # )
124
+ # clear.click(
125
+ # fn=clear_chat,
126
+ # inputs=None,
127
+ # outputs=chatbot
128
+ # )
129
+ #
130
 
131
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
132
  with gr.TabItem("🏆 LLM Leaderboard", elem_id="llm-benchmark-table", id=0):