Update README.md
Browse files
README.md
CHANGED
@@ -6,15 +6,20 @@ tags:
|
|
6 |
- gpt
|
7 |
- llm
|
8 |
- large language model
|
9 |
-
-
|
10 |
inference: true
|
11 |
-
thumbnail: https://static.wixstatic.com/media/
|
12 |
---
|
13 |
# Model Card
|
14 |
## Summary
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
This model was trained
|
17 |
-
-
|
18 |
|
19 |
|
20 |
## Usage
|
@@ -37,7 +42,7 @@ Also make sure you are providing your huggingface token to the pipeline if the m
|
|
37 |
from transformers import pipeline
|
38 |
|
39 |
generate_text = pipeline(
|
40 |
-
model="
|
41 |
torch_dtype="auto",
|
42 |
trust_remote_code=True,
|
43 |
use_fast=True,
|
@@ -75,13 +80,13 @@ from h2oai_pipeline import H2OTextGenerationPipeline
|
|
75 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
76 |
|
77 |
tokenizer = AutoTokenizer.from_pretrained(
|
78 |
-
"
|
79 |
use_fast=True,
|
80 |
padding_side="left",
|
81 |
trust_remote_code=True,
|
82 |
)
|
83 |
model = AutoModelForCausalLM.from_pretrained(
|
84 |
-
"
|
85 |
torch_dtype="auto",
|
86 |
device_map={"": "cuda:0"},
|
87 |
trust_remote_code=True,
|
@@ -107,7 +112,7 @@ You may also construct the pipeline from the loaded model and tokenizer yourself
|
|
107 |
```python
|
108 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
109 |
|
110 |
-
model_name = "
|
111 |
# Important: The prompt needs to be in the same format the model was trained with.
|
112 |
# You can find an example prompt in the experiment logs.
|
113 |
prompt = "<|prompt|>How are you?<|endoftext|><|answer|>"
|
|
|
6 |
- gpt
|
7 |
- llm
|
8 |
- large language model
|
9 |
+
- PAIX.Cloud
|
10 |
inference: true
|
11 |
+
thumbnail: https://static.wixstatic.com/media/bdee4e_8aa5cefc86024bc88f7e20e3e19d9ff3~mv2.png/v1/fill/w_192%2Ch_192%2Clg_1%2Cusm_0.66_1.00_0.01/bdee4e_8aa5cefc86024bc88f7e20e3e19d9ff3~mv2.png
|
12 |
---
|
13 |
# Model Card
|
14 |
## Summary
|
15 |
+
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss
|
16 |
+
This model, Astrid-3B, is a StableLMEpochModel model for causal language modeling, designed to generate human-like text.
|
17 |
+
It's part of our mission to make AI technology accessible to everyone, focusing on personalization, data privacy, and transparent AI governance.
|
18 |
+
Trained in English, it's a versatile tool for a variety of applications.
|
19 |
+
This model is one of the many models available on our platform, and we currently have a 1B and 7B open-source model.
|
20 |
|
21 |
+
This model was trained by [PAIX.Cloud](https://www.paix.cloud/).
|
22 |
+
- Wait list: [Wait List](https://www.paix.cloud/join-waitlist)
|
23 |
|
24 |
|
25 |
## Usage
|
|
|
42 |
from transformers import pipeline
|
43 |
|
44 |
generate_text = pipeline(
|
45 |
+
model="PAIXAI/Astrid-3B",
|
46 |
torch_dtype="auto",
|
47 |
trust_remote_code=True,
|
48 |
use_fast=True,
|
|
|
80 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
81 |
|
82 |
tokenizer = AutoTokenizer.from_pretrained(
|
83 |
+
"PAIXAI/Astrid-3B",
|
84 |
use_fast=True,
|
85 |
padding_side="left",
|
86 |
trust_remote_code=True,
|
87 |
)
|
88 |
model = AutoModelForCausalLM.from_pretrained(
|
89 |
+
"PAIXAI/Astrid-3B",
|
90 |
torch_dtype="auto",
|
91 |
device_map={"": "cuda:0"},
|
92 |
trust_remote_code=True,
|
|
|
112 |
```python
|
113 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
114 |
|
115 |
+
model_name = "PAIXAI/Astrid-3B" # either local folder or huggingface model name
|
116 |
# Important: The prompt needs to be in the same format the model was trained with.
|
117 |
# You can find an example prompt in the experiment logs.
|
118 |
prompt = "<|prompt|>How are you?<|endoftext|><|answer|>"
|