Update config.json
Browse files- config.json +3 -3
config.json
CHANGED
@@ -4,13 +4,13 @@
|
|
4 |
"quantization": "4Q_K_M GGUF",
|
5 |
"model_base": "microsoft/Phi-3-mini-4k-instruct",
|
6 |
"model_type": "phi3",
|
7 |
-
"tokenizer": "llmware/slim-qa-gen-phi-3
|
8 |
"tokenizer_local": "tokenizer_phi3.json",
|
9 |
"parameters": "3.8 billion",
|
10 |
"description": "slim-qa-gen-phi-3 is a function-calling model, fine-tuned to output structured dictionaries",
|
11 |
"prompt_wrapper": "human_bot",
|
12 |
-
"prompt_format": "<human> {context_passage} <generate> {
|
13 |
-
"output_format": "{'
|
14 |
"primary_keys": ["question, answer", "boolean", "multiple choice"],
|
15 |
"output_values": ["question", "answer"],
|
16 |
"publisher": "llmware",
|
|
|
4 |
"quantization": "4Q_K_M GGUF",
|
5 |
"model_base": "microsoft/Phi-3-mini-4k-instruct",
|
6 |
"model_type": "phi3",
|
7 |
+
"tokenizer": "llmware/slim-qa-gen-phi-3",
|
8 |
"tokenizer_local": "tokenizer_phi3.json",
|
9 |
"parameters": "3.8 billion",
|
10 |
"description": "slim-qa-gen-phi-3 is a function-calling model, fine-tuned to output structured dictionaries",
|
11 |
"prompt_wrapper": "human_bot",
|
12 |
+
"prompt_format": "<human> {context_passage} <generate> { one of primary_key } </generate>\n<bot>:",
|
13 |
+
"output_format": "{'question': ['custom generated question from passage'], 'answer': ['answer to question'] }",
|
14 |
"primary_keys": ["question, answer", "boolean", "multiple choice"],
|
15 |
"output_values": ["question", "answer"],
|
16 |
"publisher": "llmware",
|