Upload README.md
Browse files
README.md
CHANGED
@@ -8,15 +8,8 @@ license: apache-2.0
|
|
8 |
model_creator: MonsterAPI
|
9 |
model_name: Mistral 7B Norobots
|
10 |
model_type: mistral
|
11 |
-
prompt_template: '<|
|
12 |
-
|
13 |
-
{system_message}<|im_end|>
|
14 |
-
|
15 |
-
<|im_start|>user
|
16 |
-
|
17 |
-
{prompt}<|im_end|>
|
18 |
-
|
19 |
-
<|im_start|>assistant
|
20 |
|
21 |
'
|
22 |
quantized_by: TheBloke
|
@@ -80,14 +73,10 @@ It is supported by:
|
|
80 |
<!-- repositories-available end -->
|
81 |
|
82 |
<!-- prompt-template start -->
|
83 |
-
## Prompt template:
|
84 |
|
85 |
```
|
86 |
-
<|
|
87 |
-
{system_message}<|im_end|>
|
88 |
-
<|im_start|>user
|
89 |
-
{prompt}<|im_end|>
|
90 |
-
<|im_start|>assistant
|
91 |
|
92 |
```
|
93 |
|
@@ -153,11 +142,7 @@ prompts = [
|
|
153 |
"What is 291 - 150?",
|
154 |
"How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
|
155 |
]
|
156 |
-
prompt_template=f'''<|
|
157 |
-
{system_message}<|im_end|>
|
158 |
-
<|im_start|>user
|
159 |
-
{prompt}<|im_end|>
|
160 |
-
<|im_start|>assistant
|
161 |
'''
|
162 |
|
163 |
prompts = [prompt_template.format(prompt=prompt) for prompt in prompts]
|
@@ -199,11 +184,7 @@ from huggingface_hub import InferenceClient
|
|
199 |
endpoint_url = "https://your-endpoint-url-here"
|
200 |
|
201 |
prompt = "Tell me about AI"
|
202 |
-
prompt_template=f'''<|
|
203 |
-
{system_message}<|im_end|>
|
204 |
-
<|im_start|>user
|
205 |
-
{prompt}<|im_end|>
|
206 |
-
<|im_start|>assistant
|
207 |
'''
|
208 |
|
209 |
client = InferenceClient(endpoint_url)
|
@@ -266,11 +247,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
266 |
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
267 |
|
268 |
prompt = "Tell me about AI"
|
269 |
-
prompt_template=f'''<|
|
270 |
-
{system_message}<|im_end|>
|
271 |
-
<|im_start|>user
|
272 |
-
{prompt}<|im_end|>
|
273 |
-
<|im_start|>assistant
|
274 |
'''
|
275 |
|
276 |
# Convert prompt to tokens
|
|
|
8 |
model_creator: MonsterAPI
|
9 |
model_name: Mistral 7B Norobots
|
10 |
model_type: mistral
|
11 |
+
prompt_template: '<|system|> </s> <|user|> {prompt} </s> <|assistant|> {{response}}
|
12 |
+
</s>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
'
|
15 |
quantized_by: TheBloke
|
|
|
73 |
<!-- repositories-available end -->
|
74 |
|
75 |
<!-- prompt-template start -->
|
76 |
+
## Prompt template: NoRobots
|
77 |
|
78 |
```
|
79 |
+
<|system|> </s> <|user|> {prompt} </s> <|assistant|> {{response}} </s>
|
|
|
|
|
|
|
|
|
80 |
|
81 |
```
|
82 |
|
|
|
142 |
"What is 291 - 150?",
|
143 |
"How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
|
144 |
]
|
145 |
+
prompt_template=f'''<|system|> </s> <|user|> {prompt} </s> <|assistant|> {{response}} </s>
|
|
|
|
|
|
|
|
|
146 |
'''
|
147 |
|
148 |
prompts = [prompt_template.format(prompt=prompt) for prompt in prompts]
|
|
|
184 |
endpoint_url = "https://your-endpoint-url-here"
|
185 |
|
186 |
prompt = "Tell me about AI"
|
187 |
+
prompt_template=f'''<|system|> </s> <|user|> {prompt} </s> <|assistant|> {{response}} </s>
|
|
|
|
|
|
|
|
|
188 |
'''
|
189 |
|
190 |
client = InferenceClient(endpoint_url)
|
|
|
247 |
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
248 |
|
249 |
prompt = "Tell me about AI"
|
250 |
+
prompt_template=f'''<|system|> </s> <|user|> {prompt} </s> <|assistant|> {{response}} </s>
|
|
|
|
|
|
|
|
|
251 |
'''
|
252 |
|
253 |
# Convert prompt to tokens
|