Update app.py
Browse files
app.py
CHANGED
@@ -23,20 +23,19 @@ import torch
|
|
23 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
24 |
import gradio as gr
|
25 |
|
26 |
-
|
|
|
27 |
|
28 |
-
model =
|
|
|
29 |
|
30 |
def get_result_with_bloom(text):
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
early_stopping=True
|
38 |
-
)[0])
|
39 |
-
return output1
|
40 |
|
41 |
|
42 |
|
|
|
23 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
24 |
import gradio as gr
|
25 |
|
26 |
+
from transformers import GPTJForCausalLM
|
27 |
+
import torch
|
28 |
|
29 |
+
model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", revision="float16", torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
30 |
+
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
|
31 |
|
32 |
def get_result_with_bloom(text):
|
33 |
+
context = text
|
34 |
+
|
35 |
+
input_ids = tokenizer(context, return_tensors="pt").input_ids
|
36 |
+
gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.9, max_length=100,)
|
37 |
+
gen_text = tokenizer.batch_decode(gen_tokens)[0]
|
38 |
+
return gen_text
|
|
|
|
|
|
|
39 |
|
40 |
|
41 |
|