Kevin Fink commited on
Commit
50f7a65
·
1 Parent(s): 8bfc42f
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -91,18 +91,18 @@ def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size
91
  # Assuming 'text' is the input and 'target' is the expected output
92
  model_inputs = tokenizer(
93
  examples['text'],
94
- max_length=max_length, # Set to None for dynamic padding
95
- truncation=True,
96
- padding='max_length',
97
  return_tensors='pt',
98
  )
99
 
100
  # Setup the decoder input IDs (shifted right)
101
  labels = tokenizer(
102
  examples['target'],
103
- max_length=max_length, # Set to None for dynamic padding
104
- truncation=True,
105
- padding='max_length',
106
  #text_target=examples['target'],
107
  return_tensors='pt',
108
  )
 
91
  # Assuming 'text' is the input and 'target' is the expected output
92
  model_inputs = tokenizer(
93
  examples['text'],
94
+ #max_length=max_length, # Set to None for dynamic padding
95
+ #truncation=True,
96
+ #padding='max_length',
97
  return_tensors='pt',
98
  )
99
 
100
  # Setup the decoder input IDs (shifted right)
101
  labels = tokenizer(
102
  examples['target'],
103
+ #max_length=max_length, # Set to None for dynamic padding
104
+ #truncation=True,
105
+ #padding='max_length',
106
  #text_target=examples['target'],
107
  return_tensors='pt',
108
  )