Spaces:
Paused
Paused
Update app_chat.py
Browse files- app_chat.py +5 -3
app_chat.py
CHANGED
@@ -19,8 +19,10 @@ DESCRIPTION = """\
|
|
19 |
|
20 |
model_id = "nvidia/Hymba-1.5B-Instruct"
|
21 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cuda", model_dtype="bfloat16", trust_remote_code=True)
|
22 |
-
model.to(
|
23 |
-
|
|
|
|
|
24 |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
25 |
tokenizer.chat_template = "{{'<extra_id_0>System'}}{% for message in messages %}{% if message['role'] == 'system' %}{{'\n' + message['content'].strip()}}{% if tools or contexts %}{{'\n'}}{% endif %}{% endif %}{% endfor %}{% if tools %}{% for tool in tools %}{{ '\n<tool> ' + tool|tojson + ' </tool>' }}{% endfor %}{% endif %}{% if contexts %}{% if tools %}{{'\n'}}{% endif %}{% for context in contexts %}{{ '\n<context> ' + context.strip() + ' </context>' }}{% endfor %}{% endif %}{{'\n\n'}}{% for message in messages %}{% if message['role'] == 'user' %}{{ '<extra_id_1>User\n' + message['content'].strip() + '\n' }}{% elif message['role'] == 'assistant' %}{{ '<extra_id_1>Assistant\n' + message['content'].strip() + '\n' }}{% elif message['role'] == 'tool' %}{{ '<extra_id_1>Tool\n' + message['content'].strip() + '\n' }}{% endif %}{% endfor %}{%- if add_generation_prompt %}{{'<extra_id_1>Assistant\n'}}{%- endif %}"
|
26 |
#tokenizer.use_default_system_prompt = False
|
@@ -56,7 +58,7 @@ def generate(
|
|
56 |
conversation += chat_history
|
57 |
conversation.append({"role": "user", "content": message})
|
58 |
|
59 |
-
input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True, return_tensors="pt").to('cuda')
|
60 |
|
61 |
# stopping_criteria = StoppingCriteriaList([StopStringCriteria(tokenizer=tokenizer, stop_strings="</s>")])
|
62 |
|
|
|
19 |
|
20 |
model_id = "nvidia/Hymba-1.5B-Instruct"
|
21 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cuda", model_dtype="bfloat16", trust_remote_code=True)
|
22 |
+
model = model.cuda().to(torch.bfloat16)
|
23 |
+
|
24 |
+
#model.to('cuda')
|
25 |
+
#model.eval()
|
26 |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
27 |
tokenizer.chat_template = "{{'<extra_id_0>System'}}{% for message in messages %}{% if message['role'] == 'system' %}{{'\n' + message['content'].strip()}}{% if tools or contexts %}{{'\n'}}{% endif %}{% endif %}{% endfor %}{% if tools %}{% for tool in tools %}{{ '\n<tool> ' + tool|tojson + ' </tool>' }}{% endfor %}{% endif %}{% if contexts %}{% if tools %}{{'\n'}}{% endif %}{% for context in contexts %}{{ '\n<context> ' + context.strip() + ' </context>' }}{% endfor %}{% endif %}{{'\n\n'}}{% for message in messages %}{% if message['role'] == 'user' %}{{ '<extra_id_1>User\n' + message['content'].strip() + '\n' }}{% elif message['role'] == 'assistant' %}{{ '<extra_id_1>Assistant\n' + message['content'].strip() + '\n' }}{% elif message['role'] == 'tool' %}{{ '<extra_id_1>Tool\n' + message['content'].strip() + '\n' }}{% endif %}{% endfor %}{%- if add_generation_prompt %}{{'<extra_id_1>Assistant\n'}}{%- endif %}"
|
28 |
#tokenizer.use_default_system_prompt = False
|
|
|
58 |
conversation += chat_history
|
59 |
conversation.append({"role": "user", "content": message})
|
60 |
|
61 |
+
input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True, return_tensors="pt").to('cuda').to(torch.bfloat16)
|
62 |
|
63 |
# stopping_criteria = StoppingCriteriaList([StopStringCriteria(tokenizer=tokenizer, stop_strings="</s>")])
|
64 |
|