GenAI_project / scripts /train_lora.py
jaothan's picture
Upload 24 files
fa64206 verified
raw
history blame
1.25 kB
import wandb
import yaml
from transformers import Trainer, TrainingArguments
from utils.monitor import measure_resources
from data.datasets import load_and_tokenize_data
from models.lora_model import get_lora_model
# Charger la configuration
with open('config/config.yaml', 'r') as f:
config = yaml.safe_load(f)
# Initialiser wandb
wandb.init(project=config['wandb']['project'], entity=config['wandb']['entity'])
# Charger les donn�es
train_dataset, test_dataset = load_and_tokenize_data(config)
# Charger le mod�le
model = get_lora_model(config)
# D�finir les arguments de formation
training_args = TrainingArguments(
output_dir='./results',
num_train_epochs=config['training']['num_epochs'],
per_device_train_batch_size=config['training']['batch_size'],
per_device_eval_batch_size=config['training']['batch_size'],
evaluation_strategy='epoch',
save_steps=10_000,
save_total_limit=2,
logging_dir='./logs',
logging_steps=10,
)
# Cr�er le Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=test_dataset,
)
# Mesurer les ressources et entra�ner le mod�le
measure_resources(trainer, "LoRA")