Spaces:
Sleeping
Sleeping
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
import gradio as gr | |
# Path to the fine-tuned model | |
model_path = "stas-l/Ukr-Lit-SP" | |
# Load tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained("malteos/gpt2-uk") | |
model = AutoModelForCausalLM.from_pretrained(model_path) | |
# Initialize pipeline | |
generation_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
# Function for Q&A style response | |
def question_answer(user_input): | |
# Pass only the user input as the prompt | |
result = generation_pipeline( | |
user_input, | |
max_length=120, | |
num_return_sequences=1, | |
pad_token_id=tokenizer.eos_token_id | |
) | |
# Return the generated response | |
return result[0]["generated_text"].strip() | |
# Gradio Interface | |
iface = gr.Interface( | |
fn=question_answer, | |
inputs="text", | |
outputs="text", | |
title="GPT-2 Ukrainian Q&A", | |
description="Задайте будь-яке питання, і модель відповість." | |
) | |
# Launch interface | |
iface.launch() | |