Update app.py
Browse files
app.py
CHANGED
@@ -1,21 +1,14 @@
|
|
1 |
import spaces
|
2 |
import gradio as gr
|
3 |
-
import
|
4 |
-
from
|
5 |
-
from
|
|
|
|
|
6 |
|
7 |
-
""
|
8 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
9 |
-
"""
|
10 |
-
# client = InferenceClient("cognitivecomputations/dolphin-2.8-mistral-7b-v02")
|
11 |
|
12 |
-
|
13 |
-
prompt = "<s>"
|
14 |
-
for user_prompt, bot_response in history:
|
15 |
-
prompt += f"[INST] {user_prompt} [/INST]"
|
16 |
-
prompt += f" {bot_response}</s> "
|
17 |
-
prompt += f"[INST] {message} [/INST]"
|
18 |
-
return prompt
|
19 |
|
20 |
@spaces.GPU
|
21 |
def respond(
|
@@ -26,50 +19,30 @@ def respond(
|
|
26 |
temperature,
|
27 |
top_p,
|
28 |
):
|
29 |
-
|
30 |
|
31 |
-
|
32 |
-
"Weyaxi/Einstein-v6.1-Llama3-8B",
|
33 |
-
trust_remote_code=True
|
34 |
-
)
|
35 |
-
model = AutoModelForCausalLM.from_pretrained(
|
36 |
-
"Weyaxi/Einstein-v6.1-Llama3-8B",
|
37 |
-
torch_dtype="auto",
|
38 |
-
load_in_4bit=True,
|
39 |
-
trust_remote_code=True
|
40 |
-
)
|
41 |
-
history_transformer_format = history + [[message, ""]]
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
input_ids,
|
49 |
-
streamer=streamer,
|
50 |
-
max_new_tokens=max_tokens,
|
51 |
-
do_sample=True,
|
52 |
-
top_p=top_p,
|
53 |
-
top_k=50,
|
54 |
-
temperature=temperature,
|
55 |
-
num_beams=1
|
56 |
)
|
57 |
-
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
58 |
-
t.start()
|
59 |
-
partial_message = ""
|
60 |
-
for new_token in streamer:
|
61 |
-
partial_message += new_token
|
62 |
-
if '<|im_end|>' in partial_message:
|
63 |
-
break
|
64 |
-
yield partial_message
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
demo = gr.ChatInterface(
|
70 |
respond,
|
71 |
additional_inputs=[
|
72 |
-
gr.Textbox(value="You are a
|
73 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
74 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
75 |
gr.Slider(
|
@@ -80,11 +53,7 @@ demo = gr.ChatInterface(
|
|
80 |
label="Top-p (nucleus sampling)",
|
81 |
),
|
82 |
],
|
83 |
-
theme=gr.themes.Soft(primary_hue="green", secondary_hue="indigo", neutral_hue="zinc",font=[gr.themes.GoogleFont("Exo 2"), "ui-sans-serif", "system-ui", "sans-serif"]).set(
|
84 |
-
block_background_fill_dark="*neutral_800"
|
85 |
-
)
|
86 |
)
|
87 |
|
88 |
-
|
89 |
if __name__ == "__main__":
|
90 |
demo.launch()
|
|
|
1 |
import spaces
|
2 |
import gradio as gr
|
3 |
+
from huggingface_hub import hf_hub_download
|
4 |
+
from llama_cpp import Llama
|
5 |
+
from llama_cpp_agent import LlamaCppAgent
|
6 |
+
from llama_cpp_agent import MessagesFormatterType
|
7 |
+
from llama_cpp_agent.providers import LlamaCppPythonProvider
|
8 |
|
9 |
+
subprocess.run('pip install llama-cpp-python', env={'CMAKE_ARGS': "-DLLAMA_CUBLAS=ON", 'FORCE_CMAKE': '1'}, shell=True)
|
|
|
|
|
|
|
10 |
|
11 |
+
hf_hub_download(repo_id="TheBloke/Mistral-7B-Instruct-v0.2-GGUF", filename="mistral-7b-instruct-v0.2.Q6_K.gguf")
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
@spaces.GPU
|
14 |
def respond(
|
|
|
19 |
temperature,
|
20 |
top_p,
|
21 |
):
|
22 |
+
llama_model = Llama(r"mistral-7b-instruct-v0.2.Q6_K.gguf", n_batch=1024, n_threads=0, n_gpu_layers=33, n_ctx=8192, verbose=False)
|
23 |
|
24 |
+
provider = LlamaCppPythonProvider(llama_model)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
+
agent = LlamaCppAgent(
|
27 |
+
provider,
|
28 |
+
system_prompt=f"{system_message}",
|
29 |
+
predefined_messages_formatter_type=MessagesFormatterType.MISTRAL,
|
30 |
+
debug_output=True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
+
settings = provider.get_provider_default_settings()
|
34 |
+
settings.stream = True
|
35 |
+
settings.max_tokens = max_tokens
|
36 |
+
settings.temperature = temperature
|
37 |
+
settings.top_p = top_p
|
38 |
+
|
39 |
+
agent_output = agent.get_chat_response(message, llm_sampling_settings=settings)
|
40 |
+
yield agent_output.strip()
|
41 |
+
|
42 |
demo = gr.ChatInterface(
|
43 |
respond,
|
44 |
additional_inputs=[
|
45 |
+
gr.Textbox(value="You are a helpful assistant.", label="System message"),
|
46 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
47 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
48 |
gr.Slider(
|
|
|
53 |
label="Top-p (nucleus sampling)",
|
54 |
),
|
55 |
],
|
|
|
|
|
|
|
56 |
)
|
57 |
|
|
|
58 |
if __name__ == "__main__":
|
59 |
demo.launch()
|