Spaces:
Sleeping
Sleeping
File size: 1,046 Bytes
9ad0b9e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import gradio as gr
# Path to the fine-tuned model
model_path = "stas-l/Ukr-Lit-SP"
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("malteos/gpt2-uk")
model = AutoModelForCausalLM.from_pretrained(model_path)
# Initialize pipeline
generation_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Function for Q&A style response
def question_answer(user_input):
# Pass only the user input as the prompt
result = generation_pipeline(
user_input,
max_length=120,
num_return_sequences=1,
pad_token_id=tokenizer.eos_token_id
)
# Return the generated response
return result[0]["generated_text"].strip()
# Gradio Interface
iface = gr.Interface(
fn=question_answer,
inputs="text",
outputs="text",
title="GPT-2 Ukrainian Q&A",
description="Задайте будь-яке питання, і модель відповість."
)
# Launch interface
iface.launch()
|