Upload fine_tunnig_yuuka.py.txt
Browse files- fine_tunnig_yuuka.py.txt +58 -0
fine_tunnig_yuuka.py.txt
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
!pip install transformers accelerate peft torch datasets
|
2 |
+
from datasets import load_dataset
|
3 |
+
|
4 |
+
dataset = load_dataset("Mahler60/yuuka_lore") # Cambia al nombre de tu dataset
|
5 |
+
|
6 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer
|
7 |
+
from peft import get_peft_model, LoraConfig, TaskType
|
8 |
+
import json
|
9 |
+
|
10 |
+
# 1. Cargar el modelo base y el tokenizer
|
11 |
+
model_name = "EleutherAI/gpt-neox-20b"
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
13 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
14 |
+
|
15 |
+
# 2. Configurar LoRA (ajuste eficiente)
|
16 |
+
peft_config = LoraConfig(
|
17 |
+
task_type=TaskType.CAUSAL_LM, # Modelo de lenguaje causal
|
18 |
+
r=8, # Dimensión de las matrices
|
19 |
+
lora_alpha=16, # Factor de escalado
|
20 |
+
lora_dropout=0.1 # Dropout para evitar sobreajuste
|
21 |
+
)
|
22 |
+
model = get_peft_model(model, peft_config)
|
23 |
+
|
24 |
+
# 3. Cargar los datos desde el archivo JSON
|
25 |
+
data = []
|
26 |
+
with open("Yuuka-Proto.JSON", "r") as file:
|
27 |
+
for line in file:
|
28 |
+
example = json.loads(line.strip())
|
29 |
+
prompt = example["prompt"]
|
30 |
+
response = example["response"]
|
31 |
+
combined = f"{prompt} {response}" # Concatenamos prompt + respuesta como secuencia
|
32 |
+
data.append(combined)
|
33 |
+
|
34 |
+
# 4. Tokenizar el dataset
|
35 |
+
tokenized_data = tokenizer(data, padding=True, truncation=True, return_tensors="pt")
|
36 |
+
|
37 |
+
# 5. Configurar argumentos de entrenamiento
|
38 |
+
training_args = TrainingArguments(
|
39 |
+
output_dir="./results", # Carpeta donde guardar resultados
|
40 |
+
per_device_train_batch_size=1, # Tamaño del batch
|
41 |
+
num_train_epochs=1, # Número de épocas
|
42 |
+
logging_dir="./logs", # Carpeta para logs
|
43 |
+
save_steps=10, # Guardar cada X pasos
|
44 |
+
)
|
45 |
+
|
46 |
+
# 6. Configurar el Trainer
|
47 |
+
trainer = Trainer(
|
48 |
+
model=model,
|
49 |
+
args=training_args,
|
50 |
+
train_dataset=tokenized_data, # Dataset tokenizado
|
51 |
+
)
|
52 |
+
|
53 |
+
# 7. Entrenar el modelo
|
54 |
+
trainer.train()
|
55 |
+
|
56 |
+
# 8. Guardar el modelo ajustado
|
57 |
+
model.save_pretrained("./ajustado")
|
58 |
+
tokenizer.save_pretrained("./ajustado")
|