Spaces:
Sleeping
Sleeping
injilashah
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import os
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import gradio as gr
|
4 |
hf_token = os.getenv("HF_Token")
|
@@ -17,11 +18,11 @@ def Sentence_Commpletion(model_name, input):
|
|
17 |
if model_name == "Bloom":
|
18 |
tokenizer, model = b_tokenizer, b_model
|
19 |
inputs = tokenizer(input, return_tensors="pt")
|
20 |
-
outputs = model.generate(inputs.
|
21 |
elif model_name == "Gemma":
|
22 |
tokenizer, model = g_tokenizer, g_model
|
23 |
inputs = Tokenizer(input, return_tensors="pt")
|
24 |
-
outputs = Model.generate(**
|
25 |
return tokenizer.decode(outputs[0])
|
26 |
|
27 |
|
|
|
1 |
import os
|
2 |
+
import torch
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
import gradio as gr
|
5 |
hf_token = os.getenv("HF_Token")
|
|
|
18 |
if model_name == "Bloom":
|
19 |
tokenizer, model = b_tokenizer, b_model
|
20 |
inputs = tokenizer(input, return_tensors="pt")
|
21 |
+
outputs = model.generate(inputs.input_ids, max_length=30, num_return_sequences=1)
|
22 |
elif model_name == "Gemma":
|
23 |
tokenizer, model = g_tokenizer, g_model
|
24 |
inputs = Tokenizer(input, return_tensors="pt")
|
25 |
+
outputs = Model.generate(**input_ids, max_new_tokens=20)
|
26 |
return tokenizer.decode(outputs[0])
|
27 |
|
28 |
|