Spaces:
Sleeping
Sleeping
locorene1000
commited on
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import os
|
2 |
import torch
|
3 |
-
from transformers import
|
4 |
import gradio as gr
|
5 |
import spaces
|
6 |
|
@@ -20,7 +20,7 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
20 |
|
21 |
# Especifica un pad_token_id si no está ya definido y es el mismo que eos_token_id
|
22 |
if tokenizer.pad_token_id is None or tokenizer.pad_token_id == tokenizer.eos_token_id:
|
23 |
-
tokenizer.pad_token_id = tokenizer.eos_token_id + 1
|
24 |
|
25 |
model = AutoModelForCausalLM.from_pretrained(
|
26 |
model_name,
|
@@ -36,10 +36,10 @@ def mejorar_resolucion(input_text):
|
|
36 |
attention_mask = inputs['attention_mask']
|
37 |
|
38 |
outputs = model.generate(
|
39 |
-
inputs.input_ids,
|
40 |
-
attention_mask=attention_mask,
|
41 |
-
max_new_tokens=
|
42 |
-
temperature=0.3,
|
43 |
do_sample=True,
|
44 |
pad_token_id=tokenizer.pad_token_id
|
45 |
)
|
|
|
1 |
import os
|
2 |
import torch
|
3 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
4 |
import gradio as gr
|
5 |
import spaces
|
6 |
|
|
|
20 |
|
21 |
# Especifica un pad_token_id si no está ya definido y es el mismo que eos_token_id
|
22 |
if tokenizer.pad_token_id is None or tokenizer.pad_token_id == tokenizer.eos_token_id:
|
23 |
+
tokenizer.pad_token_id = tokenizer.eos_token_id + 1
|
24 |
|
25 |
model = AutoModelForCausalLM.from_pretrained(
|
26 |
model_name,
|
|
|
36 |
attention_mask = inputs['attention_mask']
|
37 |
|
38 |
outputs = model.generate(
|
39 |
+
inputs.input_ids,
|
40 |
+
attention_mask=attention_mask,
|
41 |
+
max_new_tokens=128, # Ajusta a 128 para una salida más rápida
|
42 |
+
temperature=0.3, # Recomendado para Mistral Nemo
|
43 |
do_sample=True,
|
44 |
pad_token_id=tokenizer.pad_token_id
|
45 |
)
|