htigenai commited on
Commit
33b03d6
·
verified ·
1 Parent(s): 0d2045c

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +35 -0
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ import torch
5
+
6
+ # Load model
7
+ model_id = "htigenai/finetune_test"
8
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
9
+ model = AutoModelForCausalLM.from_pretrained(
10
+ model_id,
11
+ torch_dtype=torch.float16,
12
+ device_map="auto"
13
+ )
14
+
15
+ def generate_text(prompt):
16
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
17
+ outputs = model.generate(
18
+ **inputs,
19
+ max_new_tokens=100,
20
+ temperature=0.7,
21
+ top_p=0.95,
22
+ do_sample=True
23
+ )
24
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
25
+
26
+ # Create the interface
27
+ iface = gr.Interface(
28
+ fn=generate_text,
29
+ inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
30
+ outputs=gr.Textbox(),
31
+ title="Text Generation",
32
+ description="Generate text using the fine-tuned model"
33
+ )
34
+
35
+ iface.launch()