Srfacehug commited on
Commit
10e54ac
Β·
verified Β·
1 Parent(s): 7f6f226

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -4
app.py CHANGED
@@ -1,7 +1,45 @@
1
  import gradio as gr
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
 
 
 
5
 
6
+ model = AutoModelForCausalLM.from_pretrained(
7
+ "tiiuae/falcon-7b-instruct",
8
+ torch_dtype=torch.bfloat16,
9
+ trust_remote_code=True,
10
+ device_map="auto",
11
+ low_cpu_mem_usage=True,
12
+ )
13
+ tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b-instruct")
14
+
15
+
16
+ def generate_text(input_text):
17
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
18
+ attention_mask = torch.ones(input_ids.shape)
19
+
20
+ output = model.generate(
21
+ input_ids,
22
+ attention_mask=attention_mask,
23
+ max_length=200,
24
+ do_sample=True,
25
+ top_k=10,
26
+ num_return_sequences=1,
27
+ eos_token_id=tokenizer.eos_token_id,
28
+ )
29
+
30
+ output_text = tokenizer.decode(output[0], skip_special_tokens=True)
31
+ print(output_text)
32
+
33
+ # Remove Prompt Echo from Generated Text
34
+ cleaned_output_text = output_text.replace(input_text, "")
35
+ return cleaned_output_text
36
+
37
+
38
+ text_generation_interface = gr.Interface(
39
+ fn=generate_text,
40
+ inputs=[
41
+ gr.inputs.Textbox(label="Input Text"),
42
+ ],
43
+ outputs=gr.inputs.Textbox(label="Generated Text"),
44
+ title="Falcon-7B Instruct",
45
+ ).launch()