eduardo-alvarez commited on
Commit
0b693ee
1 Parent(s): 1a89a1c

disabling chat logic until backend is configured

Browse files
Files changed (2) hide show
  1. __pycache__/app.cpython-38.pyc +0 -0
  2. app.py +27 -27
__pycache__/app.cpython-38.pyc CHANGED
Binary files a/__pycache__/app.cpython-38.pyc and b/__pycache__/app.cpython-38.pyc differ
 
app.py CHANGED
@@ -50,33 +50,33 @@ with demo:
50
  #chat_model_selection = chat_model_dropdown.value
51
  chat_model_selection = 'Intel/neural-chat-7b-v1-1'
52
 
53
- def call_api_and_stream_response(query, chat_model):
54
- """
55
- Call the API endpoint and yield characters as they are received.
56
- This function simulates streaming by yielding characters one by one.
57
- """
58
- url = "http://localhost:5004/query-stream/"
59
- params = {"query": query,"selected_model":chat_model}
60
- with requests.get(url, json=params, stream=True) as r:
61
- for chunk in r.iter_content(chunk_size=1):
62
- if chunk:
63
- yield chunk.decode()
64
-
65
- def get_response(query, history):
66
- """
67
- Wrapper function to call the streaming API and compile the response.
68
- """
69
- response = ''
70
-
71
- global chat_model_selection
72
-
73
- for char in call_api_and_stream_response(query, chat_model=chat_model_selection):
74
- if char == '<':
75
- break
76
- response += char
77
- yield response
78
-
79
- gr.ChatInterface(get_response, retry_btn = None, undo_btn=None, theme=gr.themes.Soft(), concurrency_limit=5).launch()
80
 
81
 
82
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
 
50
  #chat_model_selection = chat_model_dropdown.value
51
  chat_model_selection = 'Intel/neural-chat-7b-v1-1'
52
 
53
+ #def call_api_and_stream_response(query, chat_model):
54
+ # """
55
+ # Call the API endpoint and yield characters as they are received.
56
+ # This function simulates streaming by yielding characters one by one.
57
+ # """
58
+ # url = "http://localhost:5004/query-stream/"
59
+ # params = {"query": query,"selected_model":chat_model}
60
+ # with requests.get(url, json=params, stream=True) as r:
61
+ # for chunk in r.iter_content(chunk_size=1):
62
+ # if chunk:
63
+ # yield chunk.decode()
64
+ #
65
+ #def get_response(query, history):
66
+ # """
67
+ # Wrapper function to call the streaming API and compile the response.
68
+ # """
69
+ # response = ''
70
+ #
71
+ # global chat_model_selection
72
+ #
73
+ # for char in call_api_and_stream_response(query, chat_model=chat_model_selection):
74
+ # if char == '<':
75
+ # break
76
+ # response += char
77
+ # yield response
78
+ #
79
+ #gr.ChatInterface(get_response, retry_btn = None, undo_btn=None, concurrency_limit=5).launch()
80
 
81
 
82
  with gr.Tabs(elem_classes="tab-buttons") as tabs: