Update README.md
Browse files
README.md
CHANGED
@@ -14,23 +14,6 @@ model_name: TinyLlama-1.1B-Chat v1.0
|
|
14 |
model_type: Pythia
|
15 |
quantized_by: jartine
|
16 |
---
|
17 |
-
<div align="center">
|
18 |
-
|
19 |
-
<!-- header start -->
|
20 |
-
<!-- 200823 -->
|
21 |
-
<div style="width: auto; margin-left: auto; margin-right: auto">
|
22 |
-
</div>
|
23 |
-
<div style="display: flex; justify-content: space-between; width: 100%;">
|
24 |
-
<div style="display: flex; flex-direction: column; align-items: flex-start;">
|
25 |
-
<p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/FwAVVu7eJ4">Chat & support: jartine's Discord server</a></p>
|
26 |
-
</div>
|
27 |
-
<div style="display: flex; flex-direction: column; align-items: flex-end;">
|
28 |
-
</div>
|
29 |
-
</div>
|
30 |
-
<div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">jartine's LLM work is generously supported by a grant from <a href="https://mozilla.org">mozilla</a></p></div>
|
31 |
-
<hr style="margin-top: 1.0em; margin-bottom: 1.0em;">
|
32 |
-
<!-- header end -->
|
33 |
-
</div>
|
34 |
|
35 |
# TinyLlama-1.1B-Chat v1.0 w/ GGUF + llamafile
|
36 |
|
|
|
14 |
model_type: Pythia
|
15 |
quantized_by: jartine
|
16 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
# TinyLlama-1.1B-Chat v1.0 w/ GGUF + llamafile
|
19 |
|