Spaces:
Running
Running
urial chat initial version
Browse files
app.py
CHANGED
@@ -1,62 +1,143 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
"""
|
5 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
-
"""
|
7 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
8 |
|
9 |
|
10 |
def respond(
|
11 |
message,
|
12 |
history: list[tuple[str, str]],
|
13 |
-
system_message,
|
14 |
max_tokens,
|
15 |
temperature,
|
16 |
top_p,
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
-
|
|
|
|
|
|
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
response = ""
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
max_tokens=max_tokens,
|
33 |
-
stream=True,
|
34 |
-
temperature=temperature,
|
35 |
-
top_p=top_p,
|
36 |
-
):
|
37 |
-
token = message.choices[0].delta.content
|
38 |
-
|
39 |
response += token
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
gr.
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
|
62 |
if __name__ == "__main__":
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from openai import OpenAI
|
3 |
+
import os
|
4 |
+
from typing import List
|
5 |
+
import logging
|
6 |
+
|
7 |
+
# add logging info to console
|
8 |
+
logging.basicConfig(level=logging.INFO)
|
9 |
+
|
10 |
+
|
11 |
+
BASE_URL = "https://api.together.xyz/v1"
|
12 |
+
DEFAULT_API_KEY = os.getenv("TOGETHER_API_KEY")
|
13 |
+
import urllib.request
|
14 |
+
URIAL_VERSION = "inst_1k_v4.help"
|
15 |
+
|
16 |
+
urial_url = f"https://raw.githubusercontent.com/Re-Align/URIAL/main/urial_prompts/{URIAL_VERSION}.txt"
|
17 |
+
urial_prompt = urllib.request.urlopen(urial_url).read().decode('utf-8')
|
18 |
+
urial_prompt = urial_prompt.replace("```", '"""')
|
19 |
+
stop_str = ['"""', '# Query:', '# Answer:']
|
20 |
+
|
21 |
+
def urial_template(urial_prompt, history, message):
|
22 |
+
current_prompt = urial_prompt + "\n"
|
23 |
+
for user_msg, ai_msg in history:
|
24 |
+
current_prompt += f'# Query:\n"""\n{user_msg}\n"""\n\n# Answer:\n"""\n{ai_msg}\n"""\n\n'
|
25 |
+
current_prompt += f'# Query:\n"""\n{message}\n"""\n\n# Answer:\n"""\n'
|
26 |
+
return current_prompt
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
def openai_base_request(
|
32 |
+
model: str=None,
|
33 |
+
temperature: float=0,
|
34 |
+
max_tokens: int=512,
|
35 |
+
top_p: float=1.0,
|
36 |
+
prompt: str=None,
|
37 |
+
n: int=1,
|
38 |
+
repetition_penalty: float=1.0,
|
39 |
+
stop: List[str]=None,
|
40 |
+
api_key: str=None,
|
41 |
+
):
|
42 |
+
if api_key is None:
|
43 |
+
api_key = DEFAULT_API_KEY
|
44 |
+
client = OpenAI(api_key=api_key, base_url=BASE_URL)
|
45 |
+
# print(f"Requesting chat completion from OpenAI API with model {model}")
|
46 |
+
logging.info(f"Requesting chat completion from OpenAI API with model {model}")
|
47 |
+
logging.info(f"Prompt: {prompt}")
|
48 |
+
logging.info(f"Temperature: {temperature}")
|
49 |
+
logging.info(f"Max tokens: {max_tokens}")
|
50 |
+
logging.info(f"Top-p: {top_p}")
|
51 |
+
logging.info(f"Repetition penalty: {repetition_penalty}")
|
52 |
+
logging.info(f"Stop: {stop}")
|
53 |
+
|
54 |
+
request = client.completions.create(
|
55 |
+
model=model,
|
56 |
+
prompt=prompt,
|
57 |
+
temperature=float(temperature),
|
58 |
+
max_tokens=int(max_tokens),
|
59 |
+
top_p=float(top_p),
|
60 |
+
n=n,
|
61 |
+
extra_body={'repetition_penalty': float(repetition_penalty)},
|
62 |
+
stop=stop,
|
63 |
+
stream=True
|
64 |
+
)
|
65 |
+
|
66 |
+
return request
|
67 |
+
|
68 |
|
|
|
|
|
|
|
|
|
69 |
|
70 |
|
71 |
def respond(
|
72 |
message,
|
73 |
history: list[tuple[str, str]],
|
|
|
74 |
max_tokens,
|
75 |
temperature,
|
76 |
top_p,
|
77 |
+
rp,
|
78 |
+
model_name,
|
79 |
+
together_api_key
|
80 |
+
):
|
81 |
+
global stop_str, urial_prompt
|
82 |
+
rp = 1.0
|
83 |
+
prompt = urial_template(urial_prompt, history, message)
|
84 |
+
if model_name == "Llama-3-8B":
|
85 |
+
_model_name = "meta-llama/Llama-3-8b-hf"
|
86 |
+
elif model_name == "Llama-3-70B":
|
87 |
+
_model_name = "meta-llama/Llama-3-70b-hf"
|
88 |
+
else:
|
89 |
+
raise ValueError("Invalid model name")
|
90 |
+
# _model_name = "meta-llama/Llama-3-8b-hf"
|
91 |
|
92 |
+
if together_api_key and len(together_api_key) == 64:
|
93 |
+
api_key = together_api_key
|
94 |
+
else:
|
95 |
+
api_key = DEFAULT_API_KEY
|
96 |
|
97 |
+
request = openai_base_request(prompt=prompt, model=_model_name,
|
98 |
+
temperature=temperature,
|
99 |
+
max_tokens=max_tokens,
|
100 |
+
top_p=top_p,
|
101 |
+
repetition_penalty=rp,
|
102 |
+
stop=stop_str, api_key=api_key)
|
103 |
+
|
104 |
response = ""
|
105 |
+
for msg in request:
|
106 |
+
# print(msg.choices[0].delta.keys())
|
107 |
+
token = msg.choices[0].delta["content"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
response += token
|
109 |
+
should_stop = False
|
110 |
+
for _stop in stop_str:
|
111 |
+
if _stop in response:
|
112 |
+
should_stop = True
|
113 |
+
break
|
114 |
+
if should_stop:
|
115 |
+
break
|
116 |
+
yield response
|
117 |
+
|
118 |
+
with gr.Blocks() as demo:
|
119 |
+
with gr.Row():
|
120 |
+
with gr.Column():
|
121 |
+
gr.Label("Welcome to the URIAL Chatbot!")
|
122 |
+
model_name = gr.Radio(["Llama-3-8B", "Llama-3-70B"], value="Llama-3-8B", label="Base model name")
|
123 |
+
together_api_key = gr.Textbox(label="Together API Key", placeholder="Enter your Together API Key. Leave it blank if you want to use the default API key.", type="password")
|
124 |
+
with gr.Column():
|
125 |
+
with gr.Column():
|
126 |
+
with gr.Row():
|
127 |
+
max_tokens = gr.Textbox(value=1024, label="Max tokens")
|
128 |
+
temperature = gr.Textbox(value=0.5, label="Temperature")
|
129 |
+
with gr.Column():
|
130 |
+
with gr.Row():
|
131 |
+
top_p = gr.Textbox(value=0.9, label="Top-p")
|
132 |
+
rp = gr.Textbox(value=1.1, label="Repetition penalty")
|
133 |
+
|
134 |
+
chat = gr.ChatInterface(
|
135 |
+
respond,
|
136 |
+
additional_inputs=[max_tokens, temperature, top_p, rp, model_name, together_api_key],
|
137 |
+
# additional_inputs_accordion="⚙️ Parameters",
|
138 |
+
# fill_height=True,
|
139 |
+
)
|
140 |
+
chat.chatbot.height = 600
|
141 |
|
142 |
|
143 |
if __name__ == "__main__":
|