jarodrigues commited on
Commit
5ae56f3
1 Parent(s): 7c17f88

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -1
README.md CHANGED
@@ -140,7 +140,8 @@ The model can be used by fine-tuning it for a specific task:
140
  >>> dataset = load_dataset("PORTULAN/glueptpt", "rte")
141
 
142
  >>> def tokenize_function(examples):
143
- ... return tokenizer(examples["text"], padding="max_length", truncation=True)
 
144
  >>> tokenized_datasets = dataset.map(tokenize_function, batched=True)
145
 
146
  >>> training_args = TrainingArguments(output_dir="albertina-pt-pt-rte", evaluation_strategy="epoch")
@@ -150,6 +151,7 @@ The model can be used by fine-tuning it for a specific task:
150
  ... train_dataset=tokenized_datasets["train"],
151
  ... eval_dataset=tokenized_datasets["validation"],
152
  ... )
 
153
  >>> trainer.train()
154
 
155
  ```
 
140
  >>> dataset = load_dataset("PORTULAN/glueptpt", "rte")
141
 
142
  >>> def tokenize_function(examples):
143
+ ... return tokenizer(examples["sentence1"], examples["sentence2"], padding="max_length", truncation=True)
144
+
145
  >>> tokenized_datasets = dataset.map(tokenize_function, batched=True)
146
 
147
  >>> training_args = TrainingArguments(output_dir="albertina-pt-pt-rte", evaluation_strategy="epoch")
 
151
  ... train_dataset=tokenized_datasets["train"],
152
  ... eval_dataset=tokenized_datasets["validation"],
153
  ... )
154
+
155
  >>> trainer.train()
156
 
157
  ```