ysharma HF staff commited on
Commit
120f827
·
verified ·
1 Parent(s): ad2abe6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -2
app.py CHANGED
@@ -77,11 +77,90 @@ def like(evt: gr.LikeData):
77
  print("User liked the response")
78
  print(evt.index, evt.liked, evt.value)
79
 
80
- with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  chatbot = gr.Chatbot(type="messages", height=500, show_copy_button=True)
82
  button = gr.Button("Get San Francisco Weather")
83
  button.click(generate_response, chatbot, chatbot)
84
  chatbot.like(like)
85
 
 
 
 
 
 
 
 
 
 
86
  if __name__ == "__main__":
87
- demo.launch()
 
77
  print("User liked the response")
78
  print(evt.index, evt.liked, evt.value)
79
 
80
+
81
+ import gradio as gr
82
+ from gradio import ChatMessage
83
+ import time
84
+
85
+ def simulate_thinking_chat(message: str, history: list):
86
+ """Mimicking thinking process and response"""
87
+ # Add initial empty thinking message to chat history
88
+
89
+ history.append( # Adds new message to the chat history list
90
+ ChatMessage( # Creates a new chat message
91
+ role="assistant", # Specifies this is from the assistant
92
+ content="", # Initially empty content
93
+ metadata={"title": "Thinking Process 💭"} # Setting a thinking header here
94
+ )
95
+ )
96
+ time.sleep(0.5)
97
+ yield history # Returns current state of chat history
98
+
99
+ # Define the thoughts that LLM will "think" through
100
+ thoughts = [
101
+ "First, I need to understand the core aspects of the query...",
102
+ "Now, considering the broader context and implications...",
103
+ "Analyzing potential approaches to formulate a comprehensive answer...",
104
+ "Finally, structuring the response for clarity and completeness..."
105
+ ]
106
+
107
+ # Variable to store all thoughts as they accumulate
108
+ accumulated_thoughts = ""
109
+
110
+ # Loop through each thought
111
+ for thought in thoughts:
112
+ time.sleep(0.5) # Add a samll delay for realism
113
+
114
+ # Add new thought to accumulated thoughts with markdown bullet point
115
+ accumulated_thoughts += f"- {thought}\n\n" # \n\n creates line breaks
116
+
117
+ # Update the thinking message with all thoughts so far
118
+ history[-1] = ChatMessage( # Updates last message in history
119
+ role="assistant",
120
+ content=accumulated_thoughts.strip(), # Remove extra whitespace
121
+ metadata={"title": "💭 Thinking Process"} # Shows thinking header
122
+ )
123
+ yield history # Returns updated chat history
124
+
125
+ # After thinking is complete, adding the final response
126
+ history.append(
127
+ ChatMessage(
128
+ role="assistant",
129
+ content="Based on my thoughts and analysis above, my response is: This dummy repro shows how thoughts of a thinking LLM can be progressively shown before providing its final answer."
130
+ )
131
+ )
132
+ yield history # Returns final state of chat history
133
+
134
+ # Gradio blocks with gr.chatbot
135
+ with gr.Blocks() as demo1:
136
+ gr.Markdown("# Thinking LLM Demo 🤔")
137
+ chatbot = gr.Chatbot(type="messages", render_markdown=True)
138
+ msg = gr.Textbox(placeholder="Type your message...")
139
+
140
+ msg.submit(
141
+ lambda m, h: (m, h + [ChatMessage(role="user", content=m)]),
142
+ [msg, chatbot],
143
+ [msg, chatbot]
144
+ ).then(
145
+ simulate_thinking_chat,
146
+ [msg, chatbot],
147
+ chatbot
148
+ )
149
+
150
+ with gr.Blocks() as demo2:
151
  chatbot = gr.Chatbot(type="messages", height=500, show_copy_button=True)
152
  button = gr.Button("Get San Francisco Weather")
153
  button.click(generate_response, chatbot, chatbot)
154
  chatbot.like(like)
155
 
156
+ with gr.Blocks() as demo_final:
157
+ with gr.Tab("First"):
158
+ demo1.render()
159
+ with gr.Tab("Second"):
160
+ demo2.render()
161
+
162
+ demo_tabbed = gr.TabbedInterface([demo1, demo2], ["First", "Second"])
163
+
164
+
165
  if __name__ == "__main__":
166
+ demo_final.launch()