Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,46 +10,15 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
|
10 |
|
11 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
12 |
|
13 |
-
MODEL_ID = "nikravan/
|
14 |
CHAT_TEMPLATE = "ChatML"
|
15 |
MODEL_NAME = MODEL_ID.split("/")[-1]
|
16 |
CONTEXT_LENGTH = 16000
|
17 |
|
18 |
-
|
19 |
-
COLOR = "blue"
|
20 |
EMOJI = "🤖"
|
21 |
DESCRIPTION = f"This is the {MODEL_NAME} model designed for testing thinking for general AI tasks."
|
22 |
|
23 |
-
latex_delimiters_set = [{
|
24 |
-
"left": "\\(",
|
25 |
-
"right": "\\)",
|
26 |
-
"display": False
|
27 |
-
}, {
|
28 |
-
"left": "\\begin{equation}",
|
29 |
-
"right": "\\end{equation}",
|
30 |
-
"display": True
|
31 |
-
}, {
|
32 |
-
"left": "\\begin{align}",
|
33 |
-
"right": "\\end{align}",
|
34 |
-
"display": True
|
35 |
-
}, {
|
36 |
-
"left": "\\begin{alignat}",
|
37 |
-
"right": "\\end{alignat}",
|
38 |
-
"display": True
|
39 |
-
}, {
|
40 |
-
"left": "\\begin{gather}",
|
41 |
-
"right": "\\end{gather}",
|
42 |
-
"display": True
|
43 |
-
}, {
|
44 |
-
"left": "\\begin{CD}",
|
45 |
-
"right": "\\end{CD}",
|
46 |
-
"display": True
|
47 |
-
}, {
|
48 |
-
"left": "\\[",
|
49 |
-
"right": "\\]",
|
50 |
-
"display": True
|
51 |
-
}]
|
52 |
-
|
53 |
|
54 |
@spaces.GPU()
|
55 |
def predict(message, history, system_prompt, temperature, max_new_tokens, top_k, repetition_penalty, top_p):
|
@@ -73,7 +42,6 @@ def predict(message, history, system_prompt, temperature, max_new_tokens, top_k,
|
|
73 |
instruction += f' {message} [/INST]'
|
74 |
else:
|
75 |
raise Exception("Incorrect chat template, select 'Auto', 'ChatML' or 'Mistral Instruct'")
|
76 |
-
print(instruction)
|
77 |
|
78 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
79 |
enc = tokenizer(instruction, return_tensors="pt", padding=True, truncation=True)
|
@@ -102,7 +70,8 @@ def predict(message, history, system_prompt, temperature, max_new_tokens, top_k,
|
|
102 |
if new_token in stop_tokens:
|
103 |
break
|
104 |
result = "".join(outputs)
|
105 |
-
|
|
|
106 |
|
107 |
|
108 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
@@ -133,3 +102,4 @@ gr.ChatInterface(
|
|
133 |
],
|
134 |
theme=gr.themes.Soft(primary_hue=COLOR),
|
135 |
).queue().launch()
|
|
|
|
10 |
|
11 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
12 |
|
13 |
+
MODEL_ID = "nikravan/Marco-o1-q4"
|
14 |
CHAT_TEMPLATE = "ChatML"
|
15 |
MODEL_NAME = MODEL_ID.split("/")[-1]
|
16 |
CONTEXT_LENGTH = 16000
|
17 |
|
18 |
+
COLOR = "black" # تغییر رنگ به مشکی
|
|
|
19 |
EMOJI = "🤖"
|
20 |
DESCRIPTION = f"This is the {MODEL_NAME} model designed for testing thinking for general AI tasks."
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
@spaces.GPU()
|
24 |
def predict(message, history, system_prompt, temperature, max_new_tokens, top_k, repetition_penalty, top_p):
|
|
|
42 |
instruction += f' {message} [/INST]'
|
43 |
else:
|
44 |
raise Exception("Incorrect chat template, select 'Auto', 'ChatML' or 'Mistral Instruct'")
|
|
|
45 |
|
46 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
47 |
enc = tokenizer(instruction, return_tensors="pt", padding=True, truncation=True)
|
|
|
70 |
if new_token in stop_tokens:
|
71 |
break
|
72 |
result = "".join(outputs)
|
73 |
+
# تغییر قالب به Markdown و LaTeX
|
74 |
+
yield f"### $$ {result} $$"
|
75 |
|
76 |
|
77 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
102 |
],
|
103 |
theme=gr.themes.Soft(primary_hue=COLOR),
|
104 |
).queue().launch()
|
105 |
+
|