sombochea commited on
Commit
4d214ec
1 Parent(s): d427d25
Files changed (1) hide show
  1. app.py +22 -4
app.py CHANGED
@@ -1,7 +1,25 @@
 
 
1
  import gradio as gr
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import gradio as gr
4
 
5
+ # Load model and tokenizer (using CPU for broader accessibility)
6
+ model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2", torch_dtype=torch.float32, device_map="cpu", trust_remote_code=True)
7
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True)
8
 
9
+ def generate_text(prompt):
10
+ inputs = tokenizer(prompt, return_tensors="pt", return_attention_mask=False)
11
+ outputs = model.generate(**inputs, max_length=200)
12
+ text = tokenizer.batch_decode(outputs)[0]
13
+ return text
14
+
15
+ # Create Gradio interface
16
+ iface = gr.Interface(
17
+ fn=generate_text,
18
+ inputs=[gr.Textbox(lines=5, label="Enter your prompt")],
19
+ outputs="text",
20
+ title="PHI-2 Text Generator",
21
+ description="Generate text using the PHI-2 generative language model",
22
+ )
23
+
24
+ # Launch the interface
25
+ iface.launch()