flyingfishinwater commited on
Commit
9b58bb3
·
1 Parent(s): 295aa8f

Update models.json

Browse files
Files changed (1) hide show
  1. models.json +15 -9
models.json CHANGED
@@ -4,7 +4,7 @@
4
  "model_title": "Llama2-1.3B",
5
  "model_file": "ggml-model-Q8_0.gguf",
6
  "model_url": "https://",
7
- "model_info_url": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
8
  "model_avatar": "ava0_48",
9
  "model_description": "The standard Llama2 based 1.3B LLM.",
10
  "developer": "Meta",
@@ -17,13 +17,14 @@
17
  "model_inference" : "llama",
18
  "n_batch" : 10,
19
  "template_name" : "HumanBot",
20
- "is_ready": true
 
21
  },
22
  {
23
  "id": "tinyllama-1.1B-chat-Q8",
24
  "model_title": "TinyLlama-1.1B-chat",
25
  "model_file": "mistral-7b-instruct-v0.2.Q5_K_M.gguf",
26
- "model_url": "https://huggingface.co/flyingfishinwater/goodmodels/blob/main/tinyllama-1.1B-chat-v1.0-Q8_0.gguf",
27
  "model_info_url": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0",
28
  "model_avatar": "logo_tinyllama",
29
  "model_description": "The TinyLlama 1.1B model.",
@@ -37,13 +38,14 @@
37
  "model_inference" : "llama",
38
  "n_batch" : 10,
39
  "template_name" : "TinyLlama",
40
- "is_ready": true
 
41
  },
42
  {
43
  "id": "tinyllama-1.1B-32k-Q8",
44
  "model_title": "TinyLlama-1.1B-32k",
45
  "model_file": "mistral-7b-instruct-v0.2.Q5_K_M.gguf",
46
- "model_url": "https://huggingface.co/flyingfishinwater/goodmodels/blob/main/tinyllama-1.1B-chat-v1.0-Q8_0.gguf",
47
  "model_info_url": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0",
48
  "model_avatar": "logo_tinyllama",
49
  "model_description": "The TinyLlama 1.1B model.",
@@ -57,7 +59,8 @@
57
  "model_inference" : "llama",
58
  "n_batch" : 10,
59
  "template_name" : "TinyLlama",
60
- "is_ready": false
 
61
  },
62
  {
63
  "id": "mistral-7b-instruct-v0.2-Q5_K_M",
@@ -77,7 +80,8 @@
77
  "model_inference" : "llama",
78
  "n_batch" : 10,
79
  "template_name" : "Mistral",
80
- "is_ready": true
 
81
  },
82
  {
83
  "id": "mistral-7b-instruct-v0.2-Q8",
@@ -97,7 +101,8 @@
97
  "model_inference" : "llama",
98
  "n_batch" : 10,
99
  "template_name" : "Mistral",
100
- "is_ready": true
 
101
  },
102
  {
103
  "id": "openchat-3.5-1210-Q5_K_M",
@@ -117,6 +122,7 @@
117
  "model_inference" : "llama",
118
  "n_batch" : 10,
119
  "template_name" : "Mistral",
120
- "is_ready": true
 
121
  }
122
  ]
 
4
  "model_title": "Llama2-1.3B",
5
  "model_file": "ggml-model-Q8_0.gguf",
6
  "model_url": "https://",
7
+ "model_info_url": "https://huggingface.co/princeton-nlp/Sheared-LLaMA-1.3B",
8
  "model_avatar": "ava0_48",
9
  "model_description": "The standard Llama2 based 1.3B LLM.",
10
  "developer": "Meta",
 
17
  "model_inference" : "llama",
18
  "n_batch" : 10,
19
  "template_name" : "HumanBot",
20
+ "is_ready": true,
21
+ "is_internal": true
22
  },
23
  {
24
  "id": "tinyllama-1.1B-chat-Q8",
25
  "model_title": "TinyLlama-1.1B-chat",
26
  "model_file": "mistral-7b-instruct-v0.2.Q5_K_M.gguf",
27
+ "model_url": "https://huggingface.co/flyingfishinwater/goodmodels/blob/main/tinyllama-1.1B-chat-v1.0-Q8_0.gguf?download=true",
28
  "model_info_url": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0",
29
  "model_avatar": "logo_tinyllama",
30
  "model_description": "The TinyLlama 1.1B model.",
 
38
  "model_inference" : "llama",
39
  "n_batch" : 10,
40
  "template_name" : "TinyLlama",
41
+ "is_ready": true,
42
+ "is_internal": false
43
  },
44
  {
45
  "id": "tinyllama-1.1B-32k-Q8",
46
  "model_title": "TinyLlama-1.1B-32k",
47
  "model_file": "mistral-7b-instruct-v0.2.Q5_K_M.gguf",
48
+ "model_url": "https://huggingface.co/flyingfishinwater/goodmodels/blob/main/tinyllama-1.1B-chat-v1.0-Q8_0.gguf?download=true",
49
  "model_info_url": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0",
50
  "model_avatar": "logo_tinyllama",
51
  "model_description": "The TinyLlama 1.1B model.",
 
59
  "model_inference" : "llama",
60
  "n_batch" : 10,
61
  "template_name" : "TinyLlama",
62
+ "is_ready": false,
63
+ "is_internal": true
64
  },
65
  {
66
  "id": "mistral-7b-instruct-v0.2-Q5_K_M",
 
80
  "model_inference" : "llama",
81
  "n_batch" : 10,
82
  "template_name" : "Mistral",
83
+ "is_ready": true,
84
+ "is_internal": false
85
  },
86
  {
87
  "id": "mistral-7b-instruct-v0.2-Q8",
 
101
  "model_inference" : "llama",
102
  "n_batch" : 10,
103
  "template_name" : "Mistral",
104
+ "is_ready": true,
105
+ "is_internal": false
106
  },
107
  {
108
  "id": "openchat-3.5-1210-Q5_K_M",
 
122
  "model_inference" : "llama",
123
  "n_batch" : 10,
124
  "template_name" : "Mistral",
125
+ "is_ready": true,
126
+ "is_internal": false
127
  }
128
  ]