Pra-tham commited on
Commit
5a97268
·
1 Parent(s): 001a336
Files changed (1) hide show
  1. app.py +35 -16
app.py CHANGED
@@ -1,12 +1,23 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
-
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
 
9
 
 
 
 
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
@@ -42,22 +53,30 @@ def respond(
42
  """
43
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
  """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
60
 
 
 
 
 
 
 
 
61
 
 
62
  if __name__ == "__main__":
63
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from model import *
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
+ def evaluate_response(problem):
10
+ # problem=b'what is angle x if angle y is 60 degree and angle z in 60 degree of a traingle'
11
+ problem=problem.decode('utf-8')
12
+ results, answers = [[],[]]
13
+ messages = [{"role": "user", "content": problem }]
14
+ query_prompt = tokenizer.apply_chat_template(messages, tokenize=False)
15
+ raw_output = pipeline(query_prompt, max_new_tokens=2048, do_sample=True, temperature=0.9, return_full_text=False)
16
 
17
+ raw_output = raw_output[0]['generated_text']
18
+ # result_output, code_output = process_output(raw_output)
19
+ return raw_output
20
+
21
  def respond(
22
  message,
23
  history: list[tuple[str, str]],
 
53
  """
54
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
55
  """
56
+ # demo = gr.ChatInterface(
57
+ # evaluate_response,
58
+ # additional_inputs=[
59
+ # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
60
+ # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
61
+ # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
62
+ # gr.Slider(
63
+ # minimum=0.1,
64
+ # maximum=1.0,
65
+ # value=0.95,
66
+ # step=0.05,
67
+ # label="Top-p (nucleus sampling)",
68
+ # ),
69
+ # ],
70
+ # )
71
 
72
+ demo = gr.Interface(
73
+ fn=evaluate_response,
74
+ inputs=[gr.Textbox(label="Question")],
75
+ outputs=gr.Textbox(label="Answer"),
76
+ title="Question and Answer Interface",
77
+ description="Enter a question."
78
+ )
79
 
80
+
81
  if __name__ == "__main__":
82
  demo.launch()