OuroborosM commited on
Commit
141ce89
·
1 Parent(s): febac0d

add callback

Browse files
Files changed (1) hide show
  1. app.py +140 -2
app.py CHANGED
@@ -1,4 +1,6 @@
1
  # from typing import Any, Coroutine
 
 
2
  import openai
3
  import os
4
  # from langchain.vectorstores import Chroma
@@ -40,7 +42,7 @@ import gradio as gr
40
  import time
41
 
42
  import glob
43
- from typing import Any, List, Mapping, Optional
44
  from multiprocessing import Pool
45
  from tqdm import tqdm
46
  from pygame import mixer
@@ -94,6 +96,59 @@ from interpreter.code_interpreter import CodeInterpreter
94
 
95
  import regex
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  global CurrentAgent
98
  CurrentAgent = os.environ["agent_type"]
99
 
@@ -1549,13 +1604,94 @@ def chathmi4(message, history2):
1549
  # yield ["", history2, "SUBMIT", "STOP"]
1550
  try:
1551
  if agent is not None:
 
1552
  response = agent.run(message)
 
 
 
 
 
 
 
1553
  elif agent is None:
1554
  response = asyncio.run(start_playwright(message))
1555
 
1556
  time.sleep(0.1)
1557
  history2 = history2 + [(None, response)]
1558
  yield ["", history2, gr.update(visible = True), gr.update(visible = False)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1559
  # yield ["", history2, None, None]
1560
  print ("response of chatbot:", response)
1561
  # real_content = response[-1:]
@@ -1597,6 +1733,7 @@ def chathmi4(message, history2):
1597
  print("-" * 20)
1598
  print("-" * 20)
1599
 
 
1600
  def chatremote(message, history2):
1601
  global last_request
1602
  global Filename_Chatbot
@@ -1987,7 +2124,8 @@ with gr.Blocks() as demo:
1987
  # then(HMI_Wait, None, [submit_button, stop_button])
1988
  # inf4 = inputtext.submit(chathmi4, [inputtext, chatbot], [inputtext, chatbot, submit_button, stop_button])
1989
  ''' open ai | new'''
1990
- inf4 = inputtext.submit(chathmi4, [inputtext, chatbot], [inputtext, chatbot, submit_button, stop_button]).\
 
1991
  success(playsound1, None, voice_output, queue=True)#.\
1992
  # success(ClearAudio, None, voice_output)
1993
 
 
1
  # from typing import Any, Coroutine
2
+ from uuid import UUID
3
+ from langchain.schema.agent import AgentAction, AgentFinish
4
  import openai
5
  import os
6
  # from langchain.vectorstores import Chroma
 
42
  import time
43
 
44
  import glob
45
+ from typing import Any, Dict, List, Mapping, Optional
46
  from multiprocessing import Pool
47
  from tqdm import tqdm
48
  from pygame import mixer
 
96
 
97
  import regex
98
 
99
+ from langchain.callbacks.base import BaseCallbackHandler
100
+ from collections.abc import Generator
101
+ from queue import Queue, Empty
102
+ from threading import Thread
103
+
104
+
105
+ class QueueCallback(BaseCallbackHandler):
106
+ """Callback handler for streaming LLM responses to a queue."""
107
+
108
+ def __init__(self, q):
109
+ self.q = q
110
+
111
+ def on_llm_new_token(self, token: str, **kwargs: any) -> None:
112
+ self.q.put(token)
113
+
114
+ def on_agent_finish(self, finish: AgentFinish, *, run_id: UUID, parent_run_id: UUID | None = None, **kwargs: Any) -> Any:
115
+ self.q.put(super().on_agent_finish(finish, run_id=run_id, parent_run_id=parent_run_id, **kwargs))
116
+
117
+ def on_agent_action(self, action: AgentAction, *, run_id: UUID, parent_run_id: UUID | None = None, **kwargs: Any) -> Any:
118
+ self.q.put(super().on_agent_action(action, run_id=run_id, parent_run_id=parent_run_id, **kwargs))
119
+
120
+ def on_llm_end(self, *args, **kwargs: any) -> None:
121
+ return self.q.empty()
122
+
123
+ def stream(input_text) -> Generator:
124
+ # Create a Queue
125
+ q = Queue()
126
+ job_done = object()
127
+
128
+ # Create a funciton to call - this will run in a thread
129
+
130
+ def task():
131
+ resp = agent.run(input_text, callbacks=QueueCallback(q))
132
+ q.put(job_done)
133
+
134
+ # Create a thread and start the function
135
+ t = Thread(target=task)
136
+ t.start()
137
+
138
+ content = ""
139
+
140
+ # Get each new token from the queue and yield for our generator
141
+ while True:
142
+ try:
143
+ next_token = q.get(True, timeout=1)
144
+ if next_token is job_done:
145
+ break
146
+ content += next_token
147
+ yield next_token, content
148
+ except Empty:
149
+ continue
150
+
151
+
152
  global CurrentAgent
153
  CurrentAgent = os.environ["agent_type"]
154
 
 
1604
  # yield ["", history2, "SUBMIT", "STOP"]
1605
  try:
1606
  if agent is not None:
1607
+
1608
  response = agent.run(message)
1609
+ # test callback
1610
+
1611
+ # temp = []
1612
+ # for next_token, content in stream(message):
1613
+ # temp = temp + content
1614
+ # history_int = history2 + [(None, temp)]
1615
+ # yield(None, history_int, None, None)
1616
  elif agent is None:
1617
  response = asyncio.run(start_playwright(message))
1618
 
1619
  time.sleep(0.1)
1620
  history2 = history2 + [(None, response)]
1621
  yield ["", history2, gr.update(visible = True), gr.update(visible = False)]
1622
+
1623
+
1624
+ # yield ["", history2, None, None]
1625
+ print ("response of chatbot:", response)
1626
+ # real_content = response[-1:]
1627
+ # print("real_content", real_content)
1628
+ # try:
1629
+ # # temp = response.split("(sandbox:/")[1] # (sandbox:/sample-20230805-0807.wav)
1630
+ # file_name = CheckFileinResp(response)
1631
+ # print("file_name:", file_name)
1632
+ # if file_name != "N/A":
1633
+ # history2 = history2 + [(None, (file_name,))]
1634
+ # Filename_Chatbot = file_name
1635
+ # yield ["", history2, "SUBMIT", "STOP"]
1636
+ # except Exception as e:
1637
+ # print("No need to add file in chatbot:", e)
1638
+
1639
+ try:
1640
+ # temp = response.split("(sandbox:/")[1] # (sandbox:/sample-20230805-0807.wav)
1641
+ file_names = CheckFileinResp(response)
1642
+ print("file_name:", file_names)
1643
+ if file_names != []:
1644
+ for file_name in file_names:
1645
+ if file_name != "":
1646
+ history2 = history2 + [(None, (file_name, file_name))]
1647
+ Filename_Chatbot = file_name
1648
+ yield ["", history2, "SUBMIT", "STOP"]
1649
+ else:
1650
+ print("No File Found in Response")
1651
+ except Exception as e:
1652
+ print("No need to add file in chatbot:", e)
1653
+
1654
+
1655
+
1656
+ except Exception as e:
1657
+ print("chathmi4 error:", e)
1658
+
1659
+ # history = history + [(message, None)]
1660
+
1661
+ print("History2: ", history2)
1662
+ print("-" * 20)
1663
+ print("-" * 20)
1664
+
1665
+ def chathmi5(message, history2):
1666
+ global last_request
1667
+ global Filename_Chatbot
1668
+ global agent
1669
+ # global ChatbotHistory
1670
+ print("Input Message:", message)
1671
+
1672
+ last_request = message
1673
+ history2 = history2 + [(message, None)]
1674
+ yield ["", history2, gr.update(visible = False), gr.update(visible = True)]
1675
+ # yield ["", history2, "SUBMIT", "STOP"]
1676
+ try:
1677
+ if agent is not None:
1678
+
1679
+ # response = agent.run(message)
1680
+ # test callback
1681
+
1682
+ temp = []
1683
+ for next_token, content in stream(message):
1684
+ temp = temp + content
1685
+ history_int = history2 + [(None, temp)]
1686
+ yield(None, history_int, None, None)
1687
+ elif agent is None:
1688
+ response = asyncio.run(start_playwright(message))
1689
+
1690
+ time.sleep(0.1)
1691
+ history2 = history_int
1692
+ yield ["", history2, gr.update(visible = True), gr.update(visible = False)]
1693
+
1694
+
1695
  # yield ["", history2, None, None]
1696
  print ("response of chatbot:", response)
1697
  # real_content = response[-1:]
 
1733
  print("-" * 20)
1734
  print("-" * 20)
1735
 
1736
+
1737
  def chatremote(message, history2):
1738
  global last_request
1739
  global Filename_Chatbot
 
2124
  # then(HMI_Wait, None, [submit_button, stop_button])
2125
  # inf4 = inputtext.submit(chathmi4, [inputtext, chatbot], [inputtext, chatbot, submit_button, stop_button])
2126
  ''' open ai | new'''
2127
+ # chathmi4 = normal, chathmi5 = callback
2128
+ inf4 = inputtext.submit(chathmi5, [inputtext, chatbot], [inputtext, chatbot, submit_button, stop_button]).\
2129
  success(playsound1, None, voice_output, queue=True)#.\
2130
  # success(ClearAudio, None, voice_output)
2131