Kevin Fink commited on
Commit
b058713
·
1 Parent(s): 1554413
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import spaces
2
  import gradio as gr
3
- from transformers import Trainer, TrainingArguments, AutoTokenizer, TFAutoModelForSeq2SeqLM
4
- from transformers import DataCollatorForSeq2Seq
5
  from datasets import load_dataset, concatenate_datasets, load_from_disk
6
  import traceback
7
  from sklearn.metrics import accuracy_score
@@ -12,19 +12,19 @@ from huggingface_hub import login
12
  from peft import get_peft_model, LoraConfig
13
 
14
  os.environ['HF_HOME'] = '/data/.huggingface'
15
-
16
  lora_config = LoraConfig(
17
  r=16, # Rank of the low-rank adaptation
18
  lora_alpha=32, # Scaling factor
19
  lora_dropout=0.1, # Dropout for LoRA layers
20
  bias="none" # Bias handling
21
  )
22
- model = TFAutoModelForSeq2SeqLM.from_pretrained('google/t5-efficient-tiny', num_labels=2, force_download=True)
23
  model = get_peft_model(model, lora_config)
24
  model.gradient_checkpointing_enable()
25
  model_save_path = '/data/lora_finetuned_model' # Specify your desired save path
26
  model.save_pretrained(model_save_path)
27
-
28
 
29
  def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
30
  try:
@@ -145,7 +145,8 @@ def predict(text):
145
 
146
  @spaces.GPU(duration=120)
147
  def run_train(dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
148
- model = TFAutoModelForSeq2SeqLM.from_pretrained('/data/lora_finetuned_model', num_labels=2)
 
149
  result = fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad)
150
  return result
151
  # Create Gradio interface
 
1
  import spaces
2
  import gradio as gr
3
+ from transformers import Trainer, TrainingArguments, AutoTokenizer, AutoModelForSeq2SeqLM
4
+ from transformers import DataCollatorForSeq2Seq, AutoConfig
5
  from datasets import load_dataset, concatenate_datasets, load_from_disk
6
  import traceback
7
  from sklearn.metrics import accuracy_score
 
12
  from peft import get_peft_model, LoraConfig
13
 
14
  os.environ['HF_HOME'] = '/data/.huggingface'
15
+ '''
16
  lora_config = LoraConfig(
17
  r=16, # Rank of the low-rank adaptation
18
  lora_alpha=32, # Scaling factor
19
  lora_dropout=0.1, # Dropout for LoRA layers
20
  bias="none" # Bias handling
21
  )
22
+ model = AutoModelForSeq2SeqLM.from_pretrained('google/t5-efficient-tiny', num_labels=2, force_download=True)
23
  model = get_peft_model(model, lora_config)
24
  model.gradient_checkpointing_enable()
25
  model_save_path = '/data/lora_finetuned_model' # Specify your desired save path
26
  model.save_pretrained(model_save_path)
27
+ '''
28
 
29
  def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
30
  try:
 
145
 
146
  @spaces.GPU(duration=120)
147
  def run_train(dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
148
+ config = AutoConfig.from_pretrained("google-t5/google/t5-efficient-tiny")
149
+ model = AutoModelForSeq2SeqLM.from_config(config, num_labels=2)
150
  result = fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad)
151
  return result
152
  # Create Gradio interface