Bofandra commited on
Commit
291088b
·
verified ·
1 Parent(s): a1544d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -15
app.py CHANGED
@@ -8,21 +8,17 @@ import httpcore
8
  import pickle
9
  setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy')
10
 
11
- """
12
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
13
- """
14
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
15
 
16
  def get_detailed_instruct(task_description: str, query: str) -> str:
17
  return f'Instruct: {task_description}\nQuery: {query}'
18
 
19
- def respond(
20
- message,
21
  history: list[tuple[str, str]],
22
  max_tokens = 2048,
23
  temperature = 0.7,
24
  top_p = 0.95,
25
- ):
26
  #system role
27
  messages = [{"role": "system", "content": "You are a sunni moslem bot that always give answer based on quran, hadith, and the companions of prophet Muhammad!"}]
28
 
@@ -119,12 +115,9 @@ def respond(
119
  yield translated_response
120
  except:
121
  yield ""
122
-
123
- """
124
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
125
- """
126
- demo = gr.ChatInterface(
127
- respond,
128
  additional_inputs=[
129
  gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Max new tokens"),
130
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
@@ -136,13 +129,15 @@ demo = gr.ChatInterface(
136
  label="Top-p (nucleus sampling)",
137
  ),
138
  ],
 
 
139
  cache_examples="lazy",
140
  examples=[
141
  ["Why is men created?"],
142
  ["Please tell me about superstition!"],
143
  ["How moses defeat pharaoh?"],
144
  ],
145
- )
146
-
147
  if __name__ == "__main__":
148
- demo.launch()
 
8
  import pickle
9
  setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy')
10
 
 
 
 
11
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
12
 
13
  def get_detailed_instruct(task_description: str, query: str) -> str:
14
  return f'Instruct: {task_description}\nQuery: {query}'
15
 
16
+ def respond(message,
 
17
  history: list[tuple[str, str]],
18
  max_tokens = 2048,
19
  temperature = 0.7,
20
  top_p = 0.95,
21
+ ):
22
  #system role
23
  messages = [{"role": "system", "content": "You are a sunni moslem bot that always give answer based on quran, hadith, and the companions of prophet Muhammad!"}]
24
 
 
115
  yield translated_response
116
  except:
117
  yield ""
118
+
119
+ demo = gr.Interface(
120
+ fn=respond,
 
 
 
121
  additional_inputs=[
122
  gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Max new tokens"),
123
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
 
129
  label="Top-p (nucleus sampling)",
130
  ),
131
  ],
132
+ inputs="textbox",
133
+ outputs="textbox",
134
  cache_examples="lazy",
135
  examples=[
136
  ["Why is men created?"],
137
  ["Please tell me about superstition!"],
138
  ["How moses defeat pharaoh?"],
139
  ],
140
+ title="Moslem Bot")
141
+
142
  if __name__ == "__main__":
143
+ demo.launch()