Update README.md
Browse files
README.md
CHANGED
@@ -52,8 +52,8 @@ This model can be easily loaded using the AutoModelForCausalLM functionality:
|
|
52 |
```python
|
53 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
54 |
tokenizer = AutoTokenizer.from_pretrained("cerebras/Cerebras-GPT-6.7B")
|
55 |
-
model = AutoModelForCausalLM.from_pretrained("cerebras/Cerebras-GPT-6.7B")
|
56 |
-
model = PeftModel.from_pretrained(model, "bjoernp/alpaca-cerebras-6.7B")
|
57 |
text = "Generative AI is "
|
58 |
```
|
59 |
|
|
|
52 |
```python
|
53 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
54 |
tokenizer = AutoTokenizer.from_pretrained("cerebras/Cerebras-GPT-6.7B")
|
55 |
+
model = AutoModelForCausalLM.from_pretrained("cerebras/Cerebras-GPT-6.7B", torch_dtype=torch.float16, device_map='auto', load_in_8bit=True)
|
56 |
+
model = PeftModel.from_pretrained(model, "bjoernp/alpaca-cerebras-6.7B", torch_dtype=torch.float16, device_map='auto')
|
57 |
text = "Generative AI is "
|
58 |
```
|
59 |
|