Adam-Ben-Khalifa commited on
Commit
e523e8e
·
verified ·
1 Parent(s): d92fd8f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -19
app.py CHANGED
@@ -13,7 +13,7 @@ For more information on `huggingface_hub` Inference API support, please check th
13
  # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
14
 
15
 
16
- def advancedPromptPipeline(InputPrompt):
17
 
18
  model="gpt-4o-mini"
19
 
@@ -27,20 +27,21 @@ def advancedPromptPipeline(InputPrompt):
27
  enhancer = PromptEnhancer(model)
28
 
29
  start_time = time.time()
30
- advanced_prompt = enhancer.enhance_prompt(input_prompt, perform_eval=False)
31
  elapsed_time = time.time() - start_time
32
 
33
 
34
- yield advanced_prompt["advanced_prompt"]
35
- #return {
36
- #"model": model,
37
- #"elapsed_time": elapsed_time,
38
- #"prompt_tokens": enhancer.prompt_tokens,
39
- #"completion_tokens": enhancer.completion_tokens,
40
- #"approximate_cost": (enhancer.prompt_tokens*i_cost)+(enhancer.completion_tokens*o_cost),
41
- #"inout_prompt": input_prompt,
42
- #"advanced_prompt": advanced_prompt["advanced_prompt"],
43
- #}
 
44
 
45
 
46
  def respond(
@@ -60,13 +61,9 @@ def respond(
60
  # messages.append({"role": "assistant", "content": val[1]})
61
  #
62
  #messages.append({"role": "user", "content": message})
63
-
64
- messages = []
65
 
66
  response = ""
67
 
68
- advancedPromptPipeline(f"{message}")
69
-
70
  #for message in client.chat_completion(
71
  # messages,
72
  # max_tokens=max_tokens,
@@ -82,9 +79,9 @@ def respond(
82
  """
83
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
84
  """
85
- demo = gr.ChatInterface(
86
  #advancedPromptPipeline,
87
- respond,
88
  #additional_inputs=[
89
  #gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
90
  #gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
@@ -92,7 +89,9 @@ demo = gr.ChatInterface(
92
  #gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)",
93
  # ),
94
  #],
95
- )
 
 
96
 
97
 
98
  if __name__ == "__main__":
 
13
  # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
14
 
15
 
16
+ async def advancedPromptPipeline(InputPrompt):
17
 
18
  model="gpt-4o-mini"
19
 
 
27
  enhancer = PromptEnhancer(model)
28
 
29
  start_time = time.time()
30
+ advanced_prompt = await enhancer.enhance_prompt(input_prompt, perform_eval=False)
31
  elapsed_time = time.time() - start_time
32
 
33
 
34
+ """return {
35
+ "model": model,
36
+ "elapsed_time": elapsed_time,
37
+ "prompt_tokens": enhancer.prompt_tokens,
38
+ "completion_tokens": enhancer.completion_tokens,
39
+ "approximate_cost": (enhancer.prompt_tokens*i_cost)+(enhancer.completion_tokens*o_cost),
40
+ "inout_prompt": input_prompt,
41
+ "advanced_prompt": advanced_prompt["advanced_prompt"],
42
+ }"""
43
+
44
+ return advanced_prompt["advanced_prompt"]
45
 
46
 
47
  def respond(
 
61
  # messages.append({"role": "assistant", "content": val[1]})
62
  #
63
  #messages.append({"role": "user", "content": message})
 
 
64
 
65
  response = ""
66
 
 
 
67
  #for message in client.chat_completion(
68
  # messages,
69
  # max_tokens=max_tokens,
 
79
  """
80
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
81
  """
82
+ #demo = gr.ChatInterface(
83
  #advancedPromptPipeline,
84
+ # respond,
85
  #additional_inputs=[
86
  #gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
87
  #gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
 
89
  #gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)",
90
  # ),
91
  #],
92
+ #)
93
+
94
+ demo = gr.Interface(fn=advancedPromptPipeline, inputs="textbox", outpust="textbox")
95
 
96
 
97
  if __name__ == "__main__":