K. K
Create app.py
5846235
raw
history blame
1.29 kB
import gradio as gr
from transformers import pipeline, set_seed
# Create text generation pipeline with GPT-2
generator = pipeline('text-generation', model='gpt2')
def generate_text(prompt, temperature, max_length, instruction):
set_seed(42)
result = generator(prompt, max_length=max_length, num_return_sequences=1, temperature=temperature)
return result[0]['generated_text']
# Define Gradio interface
iface = gr.Interface(
fn=generate_text,
inputs=[gr.inputs.Textbox(lines=3, label="Your Message"),
gr.inputs.Slider(minimum=0.1, maximum=1.0, default=0.5, label="Temperature"),
gr.inputs.Slider(minimum=10, maximum=200, default=100, label="Max Length"),
gr.inputs.Textbox(default="This is a chat with AI.", lines=2, label="AI Instruction")],
outputs=gr.outputs.Textbox(label="AI Response"),
layout="vertical",
title="GPT-2 Chat",
description="This is a chat interface with OpenAI's GPT-2 model. You can adjust the temperature and the max length of the response. The temperature controls the randomness of the model's output. Higher values (closer to 1) will make the output more random, while lower values will make it more deterministic. The max length controls the length of the model's response.",
)
iface.launch()