Kevin Fink
commited on
Commit
·
ee90a8f
1
Parent(s):
c17b108
deve
Browse files
app.py
CHANGED
@@ -246,19 +246,19 @@ def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size
|
|
246 |
# Define Gradio interface
|
247 |
@spaces.GPU
|
248 |
def predict(text):
|
|
|
249 |
config = AutoConfig.from_pretrained("shorecode/t5-efficient-tiny-nh8-summarizer")
|
250 |
model = AutoModelForSeq2SeqLM.from_config(config)
|
251 |
#initialize_weights(model)
|
252 |
tokenizer = AutoTokenizer.from_pretrained('shorecode/t5-efficient-tiny-nh8-summarizer')
|
253 |
-
inputs = tokenizer(text,
|
254 |
outputs = model(inputs)
|
255 |
predictions = outputs.logits.argmax(dim=-1)
|
256 |
return predictions.item()
|
257 |
|
258 |
|
259 |
@spaces.GPU(duration=120)
|
260 |
-
|
261 |
-
def run_train(text):
|
262 |
def initialize_weights(model):
|
263 |
for name, param in model.named_parameters():
|
264 |
if 'encoder.block.0.layer.0.DenseReluDense.wi.weight' in name: # Example layer
|
|
|
246 |
# Define Gradio interface
|
247 |
@spaces.GPU
|
248 |
def predict(text):
|
249 |
+
|
250 |
config = AutoConfig.from_pretrained("shorecode/t5-efficient-tiny-nh8-summarizer")
|
251 |
model = AutoModelForSeq2SeqLM.from_config(config)
|
252 |
#initialize_weights(model)
|
253 |
tokenizer = AutoTokenizer.from_pretrained('shorecode/t5-efficient-tiny-nh8-summarizer')
|
254 |
+
inputs = tokenizer(text, padding='max_lenght', max_length=512, truncation=True)
|
255 |
outputs = model(inputs)
|
256 |
predictions = outputs.logits.argmax(dim=-1)
|
257 |
return predictions.item()
|
258 |
|
259 |
|
260 |
@spaces.GPU(duration=120)
|
261 |
+
def run_train(dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
|
|
|
262 |
def initialize_weights(model):
|
263 |
for name, param in model.named_parameters():
|
264 |
if 'encoder.block.0.layer.0.DenseReluDense.wi.weight' in name: # Example layer
|