Update README.md
Browse files
README.md
CHANGED
@@ -164,17 +164,48 @@ The progression demonstrates a shift from factual summarization to vivid, immers
|
|
164 |
|
165 |
### Quick Start
|
166 |
```python
|
167 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
168 |
|
169 |
-
|
|
|
170 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
171 |
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
|
172 |
|
173 |
-
|
174 |
-
|
175 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
|
177 |
-
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
178 |
```
|
179 |
|
180 |
---
|
|
|
164 |
|
165 |
### Quick Start
|
166 |
```python
|
167 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
168 |
+
import torch
|
169 |
|
170 |
+
# Load the merged model and tokenizer
|
171 |
+
model_name = "deepseek-ai/deepseek-uncensored-lore" # Replace with your actual model name
|
172 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
173 |
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
|
174 |
|
175 |
+
# Define the test prompt
|
176 |
+
prompt = """Description: Jake Sully, portrayed by Sam Worthington, is a former Marine who becomes part of the Avatar Program in James Cameron's "Avatar."
|
177 |
+
He is sent to the moon Pandora, where he inhabits an avatar body to interact with the native Na'vi people.
|
178 |
+
Jake falls in love with the Na'vi culture and Neytiri, and ultimately leads a fight to protect Pandora from human exploitation.
|
179 |
+
Scenario: Jake Sully is planning a mission to protect Pandora from an impending human attack.
|
180 |
+
He needs to coordinate with the Na'vi and his human allies to devise a strategy that will safeguard their home.
|
181 |
+
Story Arc:"""
|
182 |
+
|
183 |
+
# Configure generation settings
|
184 |
+
generation_config = GenerationConfig(
|
185 |
+
temperature=0.7,
|
186 |
+
top_p=0.95,
|
187 |
+
top_k=50,
|
188 |
+
do_sample=True,
|
189 |
+
no_repeat_ngram_size=4,
|
190 |
+
repetition_penalty=1.2,
|
191 |
+
)
|
192 |
+
|
193 |
+
# Tokenize the input
|
194 |
+
inputs = tokenizer(prompt, return_tensors="pt", truncation=True).to("cuda")
|
195 |
+
|
196 |
+
# Generate text with the model
|
197 |
+
outputs = model.generate(
|
198 |
+
**inputs,
|
199 |
+
generation_config=generation_config,
|
200 |
+
max_new_tokens=150,
|
201 |
+
eos_token_id=tokenizer.eos_token_id
|
202 |
+
)
|
203 |
+
|
204 |
+
# Decode and print the generated text
|
205 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
206 |
+
print("Generated Story Arc:\n")
|
207 |
+
print(generated_text)
|
208 |
|
|
|
209 |
```
|
210 |
|
211 |
---
|