File size: 8,369 Bytes
3c1955e
 
 
c4844cd
c3426b7
3c1955e
9b58bb3
c4844cd
3c1955e
5596b7b
 
c4844cd
bab5c7c
 
 
 
 
 
 
 
9b58bb3
 
3c1955e
9aeabfb
c4844cd
 
 
 
 
 
 
 
 
 
bab5c7c
 
 
 
 
 
 
 
9b58bb3
 
d7580f1
b3847d7
c4844cd
 
 
 
b3847d7
 
c4844cd
b3847d7
 
c4844cd
b3847d7
 
 
 
 
 
 
 
c4844cd
 
b3847d7
3c1955e
c4844cd
 
 
 
3c1955e
5596b7b
c4844cd
5596b7b
 
c4844cd
bab5c7c
 
 
 
 
 
 
 
9b58bb3
 
c4844cd
 
 
 
 
 
 
 
 
 
 
 
bab5c7c
 
 
 
 
 
 
 
9b58bb3
 
bab5c7c
 
c4844cd
 
 
 
 
 
 
 
 
 
bab5c7c
 
c4844cd
bab5c7c
 
 
 
c4844cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b58bb3
 
bab5c7c
c3426b7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
[
    {
        "id": "Internal",
        "model_title": "Llama2 Lite",
        "model_file": "ggml-model-Q8_0.gguf",
        "model_url": "https://",
        "model_info_url": "https://huggingface.co/princeton-nlp/Sheared-LLaMA-1.3B",
        "model_avatar": "ava0",
        "model_description": "The standard Llama2 based 1.3B LLM.",
        "developer": "Meta",
        "developer_url": "https://ai.meta.com/llama/",
        "file_size": 1430,
        "context" : 2048,
        "temp" : 0.6,
        "prompt_format" : "<human>: {{prompt}}\n<bot>: ",
        "top_k" : 5,
        "top_p" : 0.9,
        "model_inference" : "llama",
        "n_batch" : 10,
        "template_name" : "HumanBot",
        "is_ready": true,
        "is_internal": true
    },
    {
         "id": "LiteLlama-460M-1T-Q8",
         "model_title": "LiteLlama-460M-1T",
         "model_file": "LiteLlama-460M-1T-Q8_0.gguf",
         "model_url": "https://huggingface.co/flyingfishinwater/goodmodels/resolve/main/LiteLlama-460M-1T-Q8_0.gguf?download=true",
         "model_info_url": "https://huggingface.co/ahxt/LiteLlama-460M-1T",
         "model_avatar": "logo_litellama",
         "model_description": "We present an open-source reproduction of Meta AI's LLaMa 2. However, with significantly reduced model sizes, LiteLlama-460M-1T has 460M parameters trained with 1T tokens.",
         "developer": "Xiaotian Han from Texas A&M University",
         "developer_url": "https://huggingface.co/ahxt/LiteLlama-460M-1T",
         "file_size": 493,
         "context" : 4096,
         "temp" : 0.6,
         "prompt_format" : "<|system|>You are a friendly chatbot who always responds in the style of a pirate.</s><|user|>{{prompt}}</s><|assistant|>",
         "top_k" : 5,
         "top_p" : 0.9,
         "model_inference" : "llama",
         "n_batch" : 10,
         "template_name" : "TinyLlama",
         "is_ready": true,
         "is_internal": false
    },
    {
         "id": "tinyllama-1.1B-chat-Q8",
         "model_title": "TinyLlama-1.1B-chat",
         "model_file": "tinyllama-1.1B-chat-v1.0-Q8_0.gguf",
         "model_url": "https://huggingface.co/flyingfishinwater/goodmodels/resolve/main/tinyllama-1.1B-chat-v1.0-Q8_0.gguf?download=true",
         "model_info_url": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0",
         "model_avatar": "logo_tinyllama",
         "model_description": "The TinyLlama project aims to pretrain a 1.1B Llama model on 3 trillion tokens. With some proper optimization, we can achieve this within a span of just 90 days using 16 A100-40G GPUs. The training has started on 2023-09-01.",
         "developer": "Zhang Peiyuan",
         "developer_url": "https://github.com/jzhang38/TinyLlama",
         "file_size": 1170,
         "context" : 4096,
         "temp" : 0.6,
         "prompt_format" : "<|system|>You are a friendly chatbot who always responds in the style of a pirate.</s><|user|>{{prompt}}</s><|assistant|>",
         "top_k" : 5,
         "top_p" : 0.9,
         "model_inference" : "llama",
         "n_batch" : 10,
         "template_name" : "TinyLlama",
         "is_ready": true,
         "is_internal": false
    },
    {
        "id": "mistral-7b-instruct-v0.2-Q8",
        "model_title": "Mistral 7B v0.2",
        "model_file": "mistral-7b-instruct-v0.2.Q8_0.gguf",
        "model_url": "https://huggingface.co/flyingfishinwater/goodmodels/resolve/main/mistral-7b-instruct-v0.2.Q8_0.gguf?download=true",
        "model_info_url": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
        "model_avatar": "logo_mistralai",
        "model_description": "The Mistral-7B-v0.2 Large Language Model (LLM) is a pretrained generative text model with 7 billion parameters. Mistral-7B-v0.2 outperforms Llama 2 13B on all benchmarks we tested.",
        "developer": "Mistral AI",
        "developer_url": "https://mistral.ai/",
        "file_size": 7695,
        "context" : 4096,
        "temp" : 0.6,
        "prompt_format" : "<s>[INST]{{prompt}}[/INST]</s>",
        "top_k" : 5,
        "top_p" : 0.9,
        "model_inference" : "llama",
        "n_batch" : 10,
        "template_name" : "Mistral",
        "is_ready": true,
        "is_internal": false
   },
   {
        "id": "openchat-3.5-1210-Q8",
        "model_title": "OpenChat 3.5",
        "model_file": "mistral-7b-instruct-v0.2.Q8.gguf",
        "model_url": "https://huggingface.co/flyingfishinwater/goodmodels/resolve/main/openchat-3.5-1210.Q8_0.gguf?download=true",
        "model_info_url": "https://huggingface.co/openchat/openchat_3.5",
        "model_avatar": "logo_openchat",
        "model_description": "OpenChat is an innovative library of open-source language models, fine-tuned with C-RLFT - a strategy inspired by offline reinforcement learning. Our models learn from mixed-quality data without preference labels, delivering exceptional performance on par with ChatGPT, even with a 7B model. Despite our simple approach, we are committed to developing a high-performance, commercially viable, open-source large language model, and we continue to make significant strides toward this vision.",
        "developer": "OpenChat Team",
        "developer_url": "https://openchat.team/",
        "file_size": 7695,
        "context" : 4096,
        "temp" : 0.6,
        "prompt_format" : "<s>[INST]{{prompt}}[/INST]</s>",
        "top_k" : 5,
        "top_p" : 0.9,
        "model_inference" : "llama",
        "n_batch" : 10,
        "template_name" : "Mistral",
        "is_ready": true,
        "is_internal": false
   },
   {
        "id": "phi-2",
        "model_title": "Phi-2",
        "model_file": "phi-2.Q8_0.gguf",
        "model_url": "https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q8_0.gguf?download=true",
        "model_info_url": "https://huggingface.co/microsoft/phi-2",
        "model_avatar": "logo_phi",
        "model_description": "Phi-2 is a Transformer with 2.7 billion parameters. It was trained using the same data sources as Phi-1.5, augmented with a new data source that consists of various NLP synthetic texts and filtered websites (for safety and educational value). When assessed against benchmarks testing common sense, language understanding, and logical reasoning, Phi-2 showcased a nearly state-of-the-art performance among models with less than 13 billion parameters.",
        "developer": "Microsoft",
        "developer_url": "https://huggingface.co/microsoft/phi-2",
        "file_size": 2960,
        "context" : 4096,
        "temp" : 0.6,
        "prompt_format" : "Instruct: {{prompt}}\nOutput:",
        "top_k" : 5,
        "top_p" : 0.9,
        "model_inference" : "llama",
        "n_batch" : 10,
        "template_name" : "PHI",
        "is_ready": true,
        "is_internal": false
   },
   {
        "id": "yi-6b",
        "model_title": "Yi 6B Chat",
        "model_file": "yi-6b-chat-Q8_0.gguf",
        "model_url": "https://huggingface.co/flyingfishinwater/goodmodels/resolve/main/yi-6b-chat-Q8_0.gguf?download=true",
        "model_info_url": "https://huggingface.co/01-ai/Yi-6B-Chat",
        "model_avatar": "logo_yi",
        "model_description": "The Yi series models are the next generation of open-source large language models trained from scratch by 01.AI. Targeted as a bilingual language model and trained on 3T multilingual corpus, the Yi series models become one of the strongest LLM worldwide, showing promise in language understanding, commonsense reasoning, reading comprehension, and more. For example, For English language capability, the Yi series models ranked 2nd (just behind GPT-4), outperforming other LLMs (such as LLaMA2-chat-70B, Claude 2, and ChatGPT) on the AlpacaEval Leaderboard in Dec 2023. For Chinese language capability, the Yi series models landed in 2nd place (following GPT-4), surpassing other LLMs (such as Baidu ERNIE, Qwen, and Baichuan) on the SuperCLUE in Oct 2023.",
        "developer": "01.AI",
        "developer_url": "https://01.ai/",
        "file_size": 6440,
        "context" : 200000,
        "temp" : 0.6,
        "prompt_format" : "<|im_start|>user\n<|im_end|>\n{{prompt}}\n<|im_start|>assistant\n",
        "top_k" : 5,
        "top_p" : 0.9,
        "model_inference" : "llama",
        "n_batch" : 10,
        "template_name" : "yi",
        "is_ready": true,
        "is_internal": false
   }
]