Update README.md
Browse files
README.md
CHANGED
@@ -69,7 +69,7 @@ messages = [
|
|
69 |
]
|
70 |
tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
71 |
|
72 |
-
outputs = model.generate(tokenized_chat,
|
73 |
print(tokenizer.decode(outputs[0]))
|
74 |
```
|
75 |
|
@@ -85,7 +85,7 @@ messages = [
|
|
85 |
{"role": "user", "content": "Who are you?"},
|
86 |
]
|
87 |
pipe = pipeline("text-generation", model="nvidia/Mistral-NeMo-Minitron-8B-Instruct")
|
88 |
-
pipe(messages,
|
89 |
```
|
90 |
|
91 |
## Evaluation Results
|
|
|
69 |
]
|
70 |
tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
71 |
|
72 |
+
outputs = model.generate(tokenized_chat, stop_strings=["<extra_id_1>"], tokenizer=tokenizer)
|
73 |
print(tokenizer.decode(outputs[0]))
|
74 |
```
|
75 |
|
|
|
85 |
{"role": "user", "content": "Who are you?"},
|
86 |
]
|
87 |
pipe = pipeline("text-generation", model="nvidia/Mistral-NeMo-Minitron-8B-Instruct")
|
88 |
+
pipe(messages, max_new_tokens=64, stop_strings=["<extra_id_1>"], tokenizer=tokenizer)
|
89 |
```
|
90 |
|
91 |
## Evaluation Results
|