--- license: apache-2.0 datasets: - christykoh/imdb_pt language: - pt metrics: - accuracy library_name: transformers pipeline_tag: text-classification tags: - sentiment-analysis --- ## bert-base-cased | Epoch | Training Loss | Validation Loss | Accuracy | |-------|----------------|------------------|----------| | 1 | 0.304600 | 0.224774 | 0.908200 | | 2 | 0.138800 | 0.222201 | 0.918200 | | 3 | 0.080800 | 0.316631 | 0.922200 | ## Gpt2-portuguese-small | Epoch | Training Loss | Validation Loss | Accuracy | |-------|---------------|------------------|----------| | 1 | 0.341800 | 0.241748 | 0.897600 | | 2 | 0.202500 | 0.224077 | 0.911600 | | 3 | 0.149300 | 0.239030 | 0.916000 | ```python # IMDB ! pip install transformers datasets evaluate accelerate -q import evaluate import numpy as np from huggingface_hub import login from datasets import load_dataset, Dataset, DatasetDict from transformers import AutoTokenizer, DataCollatorWithPadding from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer # Basic fine-tuning arguments token="your_token" task="christykoh/imdb_pt" model_name="neuralmind/bert-base-portuguese-cased" output_dir="checkpoint" learning_rate=4e-5 per_device_train_batch_size=32 per_device_eval_batch_size=32 num_train_epochs=3 weight_decay=0.01 evaluation_strategy="epoch" save_strategy="epoch" hub_model_id="nicholasKluge/Teeny-tiny-llama-162m-imdb" # Login on the hub to load and push login(token=token) # Load the task dataset = load_dataset(task) # Create a `ModelForSequenceClassification` model = AutoModelForSequenceClassification.from_pretrained( model_name, num_labels=2, id2label={0: "NEGATIVE", 1: "POSITIVE"}, label2id={"NEGATIVE": 0, "POSITIVE": 1} ) tokenizer = AutoTokenizer.from_pretrained(model_name) # If model does not have a pad_token, we need to add it #tokenizer.pad_token = tokenizer._eos_token #model.config.pad_token_id = model.config.eos_token_id # Pre process the dataset def preprocess_function(examples): return tokenizer(examples["text"], truncation=True, max_length=256) dataset_tokenized = dataset.map(preprocess_function, batched=True) # Create a simple data collactor data_collator = DataCollatorWithPadding(tokenizer=tokenizer) # Use accuracy as evaluation metric accuracy = evaluate.load("accuracy") # Function to compute accuracy def compute_metrics(eval_pred): predictions, labels = eval_pred predictions = np.argmax(predictions, axis=1) return accuracy.compute(predictions=predictions, references=labels) # Define training arguments training_args = TrainingArguments( output_dir=output_dir, learning_rate=learning_rate, per_device_train_batch_size=per_device_train_batch_size, per_device_eval_batch_size=per_device_eval_batch_size, num_train_epochs=num_train_epochs, weight_decay=weight_decay, evaluation_strategy=evaluation_strategy, save_strategy=save_strategy, load_best_model_at_end=True, push_to_hub=False, hub_token=token, hub_private_repo=True, hub_model_id=hub_model_id, tf32=False, ) # Define the Trainer trainer = Trainer( model=model, args=training_args, train_dataset=dataset_tokenized["train"], eval_dataset=dataset_tokenized["test"], tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) # Train! trainer.train() ```