Adam-Ben-Khalifa
commited on
Commit
•
69395bf
1
Parent(s):
6fefe56
Update app.py
Browse files
app.py
CHANGED
@@ -29,49 +29,55 @@ async def advancedPromptPipeline(InputPrompt):
|
|
29 |
start_time = time.time()
|
30 |
advanced_prompt = await enhancer.enhance_prompt(input_prompt, perform_eval=False)
|
31 |
elapsed_time = time.time() - start_time
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
"
|
37 |
-
"
|
38 |
-
"
|
39 |
-
"
|
40 |
-
"
|
|
|
|
|
41 |
}
|
42 |
|
43 |
|
44 |
def respond(
|
45 |
message,
|
46 |
-
history: list[tuple[str, str]],
|
47 |
-
system_message,
|
48 |
-
max_tokens,
|
49 |
-
temperature,
|
50 |
-
top_p,
|
51 |
):
|
52 |
-
messages = [{"role": "system", "content": system_message}]
|
53 |
-
|
54 |
-
for val in history:
|
55 |
-
if val[0]:
|
56 |
-
messages.append({"role": "user", "content": val[0]})
|
57 |
-
if val[1]:
|
58 |
-
messages.append({"role": "assistant", "content": val[1]})
|
59 |
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
|
|
|
|
62 |
response = ""
|
63 |
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
75 |
|
76 |
"""
|
77 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
|
|
29 |
start_time = time.time()
|
30 |
advanced_prompt = await enhancer.enhance_prompt(input_prompt, perform_eval=False)
|
31 |
elapsed_time = time.time() - start_time
|
32 |
+
|
33 |
+
|
34 |
+
yield advanced_prompt["advanced_prompt"]
|
35 |
+
#return {
|
36 |
+
#"model": model,
|
37 |
+
#"elapsed_time": elapsed_time,
|
38 |
+
#"prompt_tokens": enhancer.prompt_tokens,
|
39 |
+
#"completion_tokens": enhancer.completion_tokens,
|
40 |
+
#"approximate_cost": (enhancer.prompt_tokens*i_cost)+(enhancer.completion_tokens*o_cost),
|
41 |
+
#"inout_prompt": input_prompt,
|
42 |
+
#"advanced_prompt": advanced_prompt["advanced_prompt"],
|
43 |
}
|
44 |
|
45 |
|
46 |
def respond(
|
47 |
message,
|
48 |
+
#history: list[tuple[str, str]],
|
49 |
+
#system_message,
|
50 |
+
#max_tokens,
|
51 |
+
#temperature,
|
52 |
+
#top_p,
|
53 |
):
|
54 |
+
#messages = [{"role": "system", "content": system_message}]
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
+
#for val in history:
|
57 |
+
# if val[0]:
|
58 |
+
# messages.append({"role": "user", "content": val[0]})
|
59 |
+
# if val[1]:
|
60 |
+
# messages.append({"role": "assistant", "content": val[1]})
|
61 |
+
#
|
62 |
+
#messages.append({"role": "user", "content": message})
|
63 |
|
64 |
+
messages = []
|
65 |
+
|
66 |
response = ""
|
67 |
|
68 |
+
advancedPromptPipeline(InputPrompt)
|
69 |
+
|
70 |
+
#for message in client.chat_completion(
|
71 |
+
# messages,
|
72 |
+
# max_tokens=max_tokens,
|
73 |
+
# stream=True,
|
74 |
+
# temperature=temperature,
|
75 |
+
# top_p=top_p,
|
76 |
+
#):
|
77 |
+
# token = message.choices[0].delta.content
|
78 |
+
|
79 |
+
# response += token
|
80 |
+
# yield response
|
81 |
|
82 |
"""
|
83 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|