Canstralian commited on
Commit
d51c33c
·
verified ·
1 Parent(s): 348ce2c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -19
app.py CHANGED
@@ -2,46 +2,60 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from typing import List, Tuple
4
 
5
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
6
 
 
 
 
 
 
 
7
  def respond(
8
  message: str,
9
  history: List[Tuple[str, str]],
10
- system_message: str,
11
  max_tokens: int,
12
  temperature: float,
13
  top_p: float,
14
  ):
 
15
  messages = [{"role": "system", "content": system_message}]
16
-
17
- # Add conversation history to the messages
18
  for val in history:
19
  if val[0]:
20
  messages.append({"role": "user", "content": val[0]})
21
  if val[1]:
22
  messages.append({"role": "assistant", "content": val[1]})
23
 
24
- # Append the new message to the conversation
25
  messages.append({"role": "user", "content": message})
26
 
27
- response = ""
28
-
29
- # Stream the response from the model
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message['choices'][0]['delta']['content']
38
- response += token
39
- yield response
 
 
 
 
 
 
 
 
40
 
 
41
  demo = gr.Interface(
42
  fn=respond,
43
  inputs=[
44
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
45
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
46
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
47
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
@@ -50,5 +64,6 @@ demo = gr.Interface(
50
  outputs=[gr.Textbox(label="Response")]
51
  )
52
 
 
53
  if __name__ == "__main__":
54
  demo.launch()
 
2
  from huggingface_hub import InferenceClient
3
  from typing import List, Tuple
4
 
5
+ # Initialize the InferenceClient
6
+ client = InferenceClient("microsoft/phi-4")
7
 
8
+ # Define the system message
9
+ system_message = "You're an advanced AI assistant designed to engage in friendly and informative conversations. Your role is to respond to user queries with helpful, clear, and concise answers, while maintaining a conversational tone. You can provide advice, explanations, and solutions based on user input.
10
+
11
+ "
12
+
13
+ # Define the response function
14
  def respond(
15
  message: str,
16
  history: List[Tuple[str, str]],
 
17
  max_tokens: int,
18
  temperature: float,
19
  top_p: float,
20
  ):
21
+ # Construct the messages for the model, adding the system prompt at the beginning
22
  messages = [{"role": "system", "content": system_message}]
 
 
23
  for val in history:
24
  if val[0]:
25
  messages.append({"role": "user", "content": val[0]})
26
  if val[1]:
27
  messages.append({"role": "assistant", "content": val[1]})
28
 
29
+ # Append the new user message
30
  messages.append({"role": "user", "content": message})
31
 
32
+ try:
33
+ response = ""
34
+ # Stream the response from the model
35
+ for msg in client.chat_completion(
36
+ messages,
37
+ max_tokens=max_tokens,
38
+ stream=True,
39
+ temperature=temperature,
40
+ top_p=top_p,
41
+ ):
42
+ if 'choices' in msg and len(msg['choices']) > 0:
43
+ token = msg['choices'][0].get('delta', {}).get('content', '')
44
+ if token:
45
+ response += token
46
+ yield response
47
+ else:
48
+ print("Error: API response did not contain expected data.")
49
+ yield "Error: Could not process the request. Please try again."
50
+ except Exception as e:
51
+ print(f"An error occurred: {e}")
52
+ yield "Error: An unexpected error occurred while processing your request."
53
 
54
+ # Define the Gradio Interface
55
  demo = gr.Interface(
56
  fn=respond,
57
  inputs=[
58
+ gr.Textbox(value=system_message, label="System message"),
59
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
60
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
61
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
64
  outputs=[gr.Textbox(label="Response")]
65
  )
66
 
67
+ # Launch the Gradio interface
68
  if __name__ == "__main__":
69
  demo.launch()