pr0mila commited on
Commit
91cfacf
·
1 Parent(s): 425c657

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -11
app.py CHANGED
@@ -23,20 +23,19 @@ import torch
23
  from transformers import AutoTokenizer, AutoModelForCausalLM
24
  import gradio as gr
25
 
26
- tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m")
 
27
 
28
- model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m")
 
29
 
30
  def get_result_with_bloom(text):
31
- result_length = 200
32
- inputs1 = tokenizer(text, return_tensors="pt")
33
- output1 = tokenizer.decode(model.generate(inputs1["input_ids"],
34
- max_length=result_length,
35
- num_beams=2,
36
- no_repeat_ngram_size=2,
37
- early_stopping=True
38
- )[0])
39
- return output1
40
 
41
 
42
 
 
23
  from transformers import AutoTokenizer, AutoModelForCausalLM
24
  import gradio as gr
25
 
26
+ from transformers import GPTJForCausalLM
27
+ import torch
28
 
29
+ model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", revision="float16", torch_dtype=torch.float16, low_cpu_mem_usage=True)
30
+ tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
31
 
32
  def get_result_with_bloom(text):
33
+ context = text
34
+
35
+ input_ids = tokenizer(context, return_tensors="pt").input_ids
36
+ gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.9, max_length=100,)
37
+ gen_text = tokenizer.batch_decode(gen_tokens)[0]
38
+ return gen_text
 
 
 
39
 
40
 
41