Kevin Fink commited on
Commit
379c443
·
1 Parent(s): d202762
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -7,14 +7,14 @@ import traceback
7
 
8
 
9
  import os
10
- from huggingface_hub import login
11
 
12
  @spaces.GPU
13
  def fine_tune_model(model_name, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
14
  try:
15
  #login(api_key.strip())
16
  # Load the model and tokenizer
17
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name.strip(), num_labels=2)
18
 
19
 
20
  # Set training arguments
@@ -45,7 +45,7 @@ def fine_tune_model(model_name, dataset_name, hub_id, api_key, num_epochs, batch
45
  max_length = 128
46
  # Load the dataset
47
  dataset = load_dataset(dataset_name.strip())
48
- tokenizer = AutoTokenizer.from_pretrained(model_name)
49
  # Tokenize the dataset
50
  def tokenize_function(examples):
51
 
 
7
 
8
 
9
  import os
10
+
11
 
12
  @spaces.GPU
13
  def fine_tune_model(model_name, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
14
  try:
15
  #login(api_key.strip())
16
  # Load the model and tokenizer
17
+ model = AutoModelForSeq2SeqLM.from_pretrained('google/t5-efficient-tiny-nh8', num_labels=2)
18
 
19
 
20
  # Set training arguments
 
45
  max_length = 128
46
  # Load the dataset
47
  dataset = load_dataset(dataset_name.strip())
48
+ tokenizer = AutoTokenizer.from_pretrained('google/t5-efficient-tiny-nh8')
49
  # Tokenize the dataset
50
  def tokenize_function(examples):
51