morriszms commited on
Commit
d0dbcfb
·
verified ·
1 Parent(s): f477429

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ MARS-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ MARS-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ MARS-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ MARS-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ MARS-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ MARS-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ MARS-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ MARS-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ MARS-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ MARS-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ MARS-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ MARS-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
MARS-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bf7429c8f239b1556a67b14fb34fa08380baa1944aac37549ffb31fd7d72927
3
+ size 3179136544
MARS-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d27d4b1ad0602c96b66e9af7ad9cea706e80ef1998e526f816e89f2fb87babf
3
+ size 4321961920
MARS-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:483c315565eef5f0a9d464cb396f34126edf6593ca1c01dc16505dcd96f0f657
3
+ size 4018923456
MARS-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ed6a7c47ca4d2ccab6f2e676becab4e2a28fb94e042a22b955fd7130fadc752
3
+ size 3664504768
MARS-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cdb175531ae1d7f98985dbaa3e2496ed24e96eb5d819c0640ebcc56b09afd56
3
+ size 4661217760
MARS-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20db657f72a03e5c915cbf23860787e8a1f8f2ba1f9ce72b80e811ba37c4c5da
3
+ size 4920740320
MARS-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c8a54fb3969bb38b1e3ab0e529e36a7630a25176eb98dee7a4a34e725c44564
3
+ size 4692675040
MARS-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38754dbe8cac6b0a7db038873d9b2b92c6d453df972b9816294402b7f9f9c6dd
3
+ size 5599300576
MARS-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0871943efac76f1d575af19ad9bbc485a45033c8a6f14af0fc6a5136dff19bc
3
+ size 5732994016
MARS-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73a0f061ec504a2387c50f6b505a8d9bf8f114f0271f24baf533106d6456a6f1
3
+ size 5599300576
MARS-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9b3e5931ed7bbaabf8a2c4d51de5155026ef3cf562d51cdc258127c7732b75b
3
+ size 6596013568
MARS-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6aa7ffa8fc39c8dcadec9422e1d9437a289d07b6dcc9846d36b581d39737ec28
3
+ size 8540779968
README.md ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama3
3
+ language:
4
+ - tr
5
+ - en
6
+ base_model: curiositytech/MARS
7
+ pipeline_tag: text-generation
8
+ tags:
9
+ - TensorBlock
10
+ - GGUF
11
+ model-index:
12
+ - name: MARS
13
+ results:
14
+ - task:
15
+ type: text-generation
16
+ name: Text Generation
17
+ dataset:
18
+ name: AI2 Reasoning Challenge TR v0.2
19
+ type: ai2_arc
20
+ config: ARC-Challenge
21
+ split: test
22
+ args:
23
+ num_few_shot: 25
24
+ metrics:
25
+ - type: acc
26
+ value: 46.08
27
+ name: accuracy
28
+ - task:
29
+ type: text-generation
30
+ name: Text Generation
31
+ dataset:
32
+ name: MMLU TR v0.2
33
+ type: cais/mmlu
34
+ config: all
35
+ split: test
36
+ args:
37
+ num_few_shot: 5
38
+ metrics:
39
+ - type: acc
40
+ value: 47.02
41
+ name: accuracy
42
+ - task:
43
+ type: text-generation
44
+ name: Text Generation
45
+ dataset:
46
+ name: TruthfulQA TR v0.2
47
+ type: truthful_qa
48
+ config: multiple_choice
49
+ split: validation
50
+ args:
51
+ num_few_shot: 0
52
+ metrics:
53
+ - type: acc
54
+ value: 49.38
55
+ name: accuracy
56
+ - task:
57
+ type: text-generation
58
+ name: Text Generation
59
+ dataset:
60
+ name: Winogrande TR v0.2
61
+ type: winogrande
62
+ config: winogrande_xl
63
+ split: validation
64
+ args:
65
+ num_few_shot: 5
66
+ metrics:
67
+ - type: acc
68
+ value: 53.71
69
+ name: accuracy
70
+ - task:
71
+ type: text-generation
72
+ name: Text Generation
73
+ dataset:
74
+ name: GSM8k TR v0.2
75
+ type: gsm8k
76
+ config: main
77
+ split: test
78
+ args:
79
+ num_few_shot: 5
80
+ metrics:
81
+ - type: acc
82
+ value: 53.08
83
+ name: accuracy
84
+ ---
85
+
86
+ <div style="width: auto; margin-left: auto; margin-right: auto">
87
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
88
+ </div>
89
+ <div style="display: flex; justify-content: space-between; width: 100%;">
90
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
91
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
92
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
93
+ </p>
94
+ </div>
95
+ </div>
96
+
97
+ ## curiositytech/MARS - GGUF
98
+
99
+ This repo contains GGUF format model files for [curiositytech/MARS](https://huggingface.co/curiositytech/MARS).
100
+
101
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
102
+
103
+ <div style="text-align: left; margin: 20px 0;">
104
+ <a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
105
+ Run them on the TensorBlock client using your local machine ↗
106
+ </a>
107
+ </div>
108
+
109
+ ## Prompt template
110
+
111
+ ```
112
+ <|begin_of_text|><|start_header_id|>system<|end_header_id|>
113
+
114
+ {system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>
115
+
116
+ {prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
117
+ ```
118
+
119
+ ## Model file specification
120
+
121
+ | Filename | Quant type | File Size | Description |
122
+ | -------- | ---------- | --------- | ----------- |
123
+ | [MARS-Q2_K.gguf](https://huggingface.co/tensorblock/MARS-GGUF/blob/main/MARS-Q2_K.gguf) | Q2_K | 2.961 GB | smallest, significant quality loss - not recommended for most purposes |
124
+ | [MARS-Q3_K_S.gguf](https://huggingface.co/tensorblock/MARS-GGUF/blob/main/MARS-Q3_K_S.gguf) | Q3_K_S | 3.413 GB | very small, high quality loss |
125
+ | [MARS-Q3_K_M.gguf](https://huggingface.co/tensorblock/MARS-GGUF/blob/main/MARS-Q3_K_M.gguf) | Q3_K_M | 3.743 GB | very small, high quality loss |
126
+ | [MARS-Q3_K_L.gguf](https://huggingface.co/tensorblock/MARS-GGUF/blob/main/MARS-Q3_K_L.gguf) | Q3_K_L | 4.025 GB | small, substantial quality loss |
127
+ | [MARS-Q4_0.gguf](https://huggingface.co/tensorblock/MARS-GGUF/blob/main/MARS-Q4_0.gguf) | Q4_0 | 4.341 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
128
+ | [MARS-Q4_K_S.gguf](https://huggingface.co/tensorblock/MARS-GGUF/blob/main/MARS-Q4_K_S.gguf) | Q4_K_S | 4.370 GB | small, greater quality loss |
129
+ | [MARS-Q4_K_M.gguf](https://huggingface.co/tensorblock/MARS-GGUF/blob/main/MARS-Q4_K_M.gguf) | Q4_K_M | 4.583 GB | medium, balanced quality - recommended |
130
+ | [MARS-Q5_0.gguf](https://huggingface.co/tensorblock/MARS-GGUF/blob/main/MARS-Q5_0.gguf) | Q5_0 | 5.215 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
131
+ | [MARS-Q5_K_S.gguf](https://huggingface.co/tensorblock/MARS-GGUF/blob/main/MARS-Q5_K_S.gguf) | Q5_K_S | 5.215 GB | large, low quality loss - recommended |
132
+ | [MARS-Q5_K_M.gguf](https://huggingface.co/tensorblock/MARS-GGUF/blob/main/MARS-Q5_K_M.gguf) | Q5_K_M | 5.339 GB | large, very low quality loss - recommended |
133
+ | [MARS-Q6_K.gguf](https://huggingface.co/tensorblock/MARS-GGUF/blob/main/MARS-Q6_K.gguf) | Q6_K | 6.143 GB | very large, extremely low quality loss |
134
+ | [MARS-Q8_0.gguf](https://huggingface.co/tensorblock/MARS-GGUF/blob/main/MARS-Q8_0.gguf) | Q8_0 | 7.954 GB | very large, extremely low quality loss - not recommended |
135
+
136
+
137
+ ## Downloading instruction
138
+
139
+ ### Command line
140
+
141
+ Firstly, install Huggingface Client
142
+
143
+ ```shell
144
+ pip install -U "huggingface_hub[cli]"
145
+ ```
146
+
147
+ Then, downoad the individual model file the a local directory
148
+
149
+ ```shell
150
+ huggingface-cli download tensorblock/MARS-GGUF --include "MARS-Q2_K.gguf" --local-dir MY_LOCAL_DIR
151
+ ```
152
+
153
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
154
+
155
+ ```shell
156
+ huggingface-cli download tensorblock/MARS-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
157
+ ```