suayptalha commited on
Commit
0c4ca35
·
verified ·
1 Parent(s): 72b0a8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -63
app.py CHANGED
@@ -2,11 +2,9 @@ import gradio as gr
2
  from gradio_client import Client, handle_file
3
  from huggingface_hub import InferenceClient
4
 
5
- # Clients for different models
6
  moondream_client = Client("vikhyatk/moondream2")
7
  qwq_client = InferenceClient("Qwen/QwQ-32B-Preview")
8
 
9
- # Function to describe the image
10
  def describe_image(image, user_message):
11
  result = moondream_client.predict(
12
  img=handle_file(image),
@@ -15,6 +13,7 @@ def describe_image(image, user_message):
15
  )
16
 
17
  description = result
 
18
  user_message = description + "\n" + user_message
19
 
20
  qwq_result = qwq_client.chat_completion(
@@ -26,68 +25,26 @@ def describe_image(image, user_message):
26
 
27
  return qwq_result['choices'][0]['message']['content']
28
 
29
- # Function to handle the chat or image inputs
30
- def respond(message, history, system_message, max_tokens, temperature, top_p):
31
- messages = [{"role": "system", "content": system_message}]
32
-
33
- for val in history:
34
- if val[0]:
35
- messages.append({"role": "user", "content": val[0]})
36
- if val[1]:
37
- messages.append({"role": "assistant", "content": val[1]})
38
-
39
- messages.append({"role": "user", "content": message})
40
-
41
- response = ""
42
- for message in qwq_client.chat_completion(
43
- messages,
44
- max_tokens=max_tokens,
45
- stream=True,
46
- temperature=temperature,
47
- top_p=top_p,
48
- ):
49
- token = message['choices'][0]['delta'].get('content', '')
50
- response += token
51
- yield response
52
-
53
- # Function to handle both image and chat input
54
- def chat_or_image(image, user_message, history, system_message, max_tokens, temperature, top_p):
55
  if image:
56
- response = describe_image(image, user_message)
57
- history.append((user_message, response)) # Keep history
58
- return response, history
59
  else:
60
- return respond(user_message, history, system_message, max_tokens, temperature, top_p)
61
-
62
- # Gradio Blocks setup
63
- with gr.Blocks() as demo:
64
- with gr.Row():
65
- with gr.Column():
66
- image_input = gr.Image(type="filepath", label="Upload image (Optional)")
67
- user_input = gr.Textbox(label="Ask anything", placeholder="Ask...", lines=2)
68
- system_message_input = gr.Textbox(value="You are a friendly assistant.", label="System message")
69
- max_tokens_input = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
70
- temperature_input = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
71
- top_p_input = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
72
- submit_button = gr.Button("Submit")
73
-
74
- with gr.Column():
75
- response_output = gr.Textbox(label="Response", lines=4, interactive=False)
76
-
77
- # Handle button click to trigger the callback
78
- submit_button.click(
79
- chat_or_image,
80
- inputs=[
81
- image_input,
82
- user_input,
83
- gr.State([]), # History state to keep track of the conversation
84
- system_message_input,
85
- max_tokens_input,
86
- temperature_input,
87
- top_p_input
88
- ],
89
- outputs=[response_output, gr.State([])], # Outputs: the response and updated history
90
- )
91
 
92
  if __name__ == "__main__":
93
- demo.launch(show_error=True)
 
2
  from gradio_client import Client, handle_file
3
  from huggingface_hub import InferenceClient
4
 
 
5
  moondream_client = Client("vikhyatk/moondream2")
6
  qwq_client = InferenceClient("Qwen/QwQ-32B-Preview")
7
 
 
8
  def describe_image(image, user_message):
9
  result = moondream_client.predict(
10
  img=handle_file(image),
 
13
  )
14
 
15
  description = result
16
+
17
  user_message = description + "\n" + user_message
18
 
19
  qwq_result = qwq_client.chat_completion(
 
25
 
26
  return qwq_result['choices'][0]['message']['content']
27
 
28
+ def chat_or_image(image, user_message):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  if image:
30
+ return describe_image(image, user_message)
 
 
31
  else:
32
+ qwq_result = qwq_client.chat_completion(
33
+ messages=[{"role": "user", "content": user_message}],
34
+ max_tokens=512,
35
+ temperature=0.7,
36
+ top_p=0.95
37
+ )
38
+ return qwq_result['choices'][0]['message']['content']
39
+
40
+ demo = gr.Interface(
41
+ fn=chat_or_image,
42
+ inputs=[
43
+ gr.Image(type="filepath", label="Upload image (Optional)"),
44
+ gr.Textbox(label="Ask anything", placeholder="Ask...", lines=2)
45
+ ],
46
+ outputs="text",
47
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
  if __name__ == "__main__":
50
+ demo.launch(show_error=True)