openfree commited on
Commit
0997082
1 Parent(s): ecb9d75

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +167 -42
app.py CHANGED
@@ -1,64 +1,189 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
8
 
 
 
 
 
 
 
9
 
10
- def respond(
 
 
 
 
 
 
 
 
 
 
11
  message,
12
- history: list[tuple[str, str]],
13
  system_message,
14
  max_tokens,
15
  temperature,
16
  top_p,
17
  ):
18
  messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
 
 
 
 
 
 
 
 
 
25
 
26
- messages.append({"role": "user", "content": message})
 
27
 
28
- response = ""
 
 
 
 
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
 
 
 
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
 
 
 
 
 
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import concurrent.futures
4
 
5
+ # Available LLM models
6
+ LLM_MODELS = {
7
+ "Llama-3.3": "meta-llama/Llama-3.3-70B-Instruct",
8
+ "QwQ-32B": "Qwen/QwQ-32B-Preview",
9
+ "C4AI-Command": "CohereForAI/c4ai-command-r-plus-08-2024",
10
+ "Marco-o1": "AIDC-AI/Marco-o1",
11
+ "Qwen2.5": "Qwen/Qwen2.5-72B-Instruct",
12
+ "Mistral-Nemo": "mistralai/Mistral-Nemo-Instruct-2407",
13
+ "Nemotron-70B": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"
14
+ }
15
 
16
+ # Default selected models
17
+ DEFAULT_MODELS = [
18
+ "meta-llama/Llama-3.3-70B-Instruct",
19
+ "CohereForAI/c4ai-command-r-plus-08-2024",
20
+ "mistralai/Mistral-Nemo-Instruct-2407"
21
+ ]
22
 
23
+ clients = {model: InferenceClient(model) for model in LLM_MODELS.values()}
24
+
25
+ def process_file(file):
26
+ if file is None:
27
+ return ""
28
+ if file.name.endswith(('.txt', '.md')):
29
+ return file.read().decode('utf-8')
30
+ return f"Uploaded file: {file.name}"
31
+
32
+ def respond_single(
33
+ client,
34
  message,
35
+ history,
36
  system_message,
37
  max_tokens,
38
  temperature,
39
  top_p,
40
  ):
41
  messages = [{"role": "system", "content": system_message}]
42
+
43
+ for user, assistant in history:
44
+ if user:
45
+ messages.append({"role": "user", "content": user})
46
+ if assistant:
47
+ messages.append({"role": "assistant", "content": assistant})
48
+
49
+ messages.append({"role": "user", "content": message})
50
+
51
+ response = ""
52
+ try:
53
+ for msg in client.chat_completion(
54
+ messages,
55
+ max_tokens=max_tokens,
56
+ stream=True,
57
+ temperature=temperature,
58
+ top_p=top_p,
59
+ ):
60
+ token = msg.choices[0].delta.content
61
+ response += token
62
+ yield response
63
+ except Exception as e:
64
+ yield f"Error: {str(e)}"
65
 
66
+ def respond_all(
67
+ message,
68
+ file,
69
+ history1,
70
+ history2,
71
+ history3,
72
+ selected_models,
73
+ system_message,
74
+ max_tokens,
75
+ temperature,
76
+ top_p,
77
+ ):
78
+ if file:
79
+ file_content = process_file(file)
80
+ message = f"{message}\n\nFile content:\n{file_content}"
81
 
82
+ while len(selected_models) < 3:
83
+ selected_models.append(selected_models[-1])
84
 
85
+ def generate(client, history):
86
+ return respond_single(
87
+ client,
88
+ message,
89
+ history,
90
+ system_message,
91
+ max_tokens,
92
+ temperature,
93
+ top_p,
94
+ )
95
 
96
+ return (
97
+ generate(clients[selected_models[0]], history1),
98
+ generate(clients[selected_models[1]], history2),
99
+ generate(clients[selected_models[2]], history3),
100
+ )
 
 
 
101
 
102
+ with gr.Blocks() as demo:
103
+ with gr.Row():
104
+ model_choices = gr.Checkboxgroup(
105
+ choices=list(LLM_MODELS.values()),
106
+ value=DEFAULT_MODELS,
107
+ label="Select Models (Choose up to 3)",
108
+ interactive=True
109
+ )
110
 
111
+ with gr.Row():
112
+ with gr.Column():
113
+ chat1 = gr.ChatInterface(
114
+ lambda message, history: None,
115
+ chatbot=gr.Chatbot(height=400, label="Chat 1"),
116
+ textbox=False,
117
+ )
118
+ with gr.Column():
119
+ chat2 = gr.ChatInterface(
120
+ lambda message, history: None,
121
+ chatbot=gr.Chatbot(height=400, label="Chat 2"),
122
+ textbox=False,
123
+ )
124
+ with gr.Column():
125
+ chat3 = gr.ChatInterface(
126
+ lambda message, history: None,
127
+ chatbot=gr.Chatbot(height=400, label="Chat 3"),
128
+ textbox=False,
129
+ )
130
 
131
+ with gr.Row():
132
+ with gr.Column():
133
+ system_message = gr.Textbox(
134
+ value="You are a friendly Chatbot.",
135
+ label="System message"
136
+ )
137
+ max_tokens = gr.Slider(
138
+ minimum=1,
139
+ maximum=2048,
140
+ value=512,
141
+ step=1,
142
+ label="Max new tokens"
143
+ )
144
+ temperature = gr.Slider(
145
+ minimum=0.1,
146
+ maximum=4.0,
147
+ value=0.7,
148
+ step=0.1,
149
+ label="Temperature"
150
+ )
151
+ top_p = gr.Slider(
152
+ minimum=0.1,
153
+ maximum=1.0,
154
+ value=0.95,
155
+ step=0.05,
156
+ label="Top-p"
157
+ )
158
+
159
+ with gr.Row():
160
+ file_input = gr.File(label="Upload File (optional)")
161
+ msg_input = gr.Textbox(
162
+ show_label=False,
163
+ placeholder="Enter text and press enter",
164
+ container=False
165
+ )
166
+
167
+ def submit_message(message, file):
168
+ return respond_all(
169
+ message,
170
+ file,
171
+ chat1.chatbot.value,
172
+ chat2.chatbot.value,
173
+ chat3.chatbot.value,
174
+ model_choices.value,
175
+ system_message.value,
176
+ max_tokens.value,
177
+ temperature.value,
178
+ top_p.value,
179
+ )
180
 
181
+ msg_input.submit(
182
+ submit_message,
183
+ [msg_input, file_input],
184
+ [chat1.chatbot, chat2.chatbot, chat3.chatbot],
185
+ api_name="submit"
186
+ )
187
 
188
  if __name__ == "__main__":
189
+ demo.launch()