stas-l commited on
Commit
9ad0b9e
1 Parent(s): 2248ecf

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -0
app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
2
+ import gradio as gr
3
+
4
+ # Path to the fine-tuned model
5
+ model_path = "stas-l/Ukr-Lit-SP"
6
+
7
+ # Load tokenizer and model
8
+ tokenizer = AutoTokenizer.from_pretrained("malteos/gpt2-uk")
9
+ model = AutoModelForCausalLM.from_pretrained(model_path)
10
+
11
+ # Initialize pipeline
12
+ generation_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
13
+
14
+ # Function for Q&A style response
15
+ def question_answer(user_input):
16
+ # Pass only the user input as the prompt
17
+ result = generation_pipeline(
18
+ user_input,
19
+ max_length=120,
20
+ num_return_sequences=1,
21
+ pad_token_id=tokenizer.eos_token_id
22
+ )
23
+ # Return the generated response
24
+ return result[0]["generated_text"].strip()
25
+
26
+ # Gradio Interface
27
+ iface = gr.Interface(
28
+ fn=question_answer,
29
+ inputs="text",
30
+ outputs="text",
31
+ title="GPT-2 Ukrainian Q&A",
32
+ description="Задайте будь-яке питання, і модель відповість."
33
+ )
34
+
35
+ # Launch interface
36
+ iface.launch()