Test2 / app.py
braunale's picture
Update app.py
7b216ab verified
raw
history blame
547 Bytes
import gradio as gr
from transformers import pipeline
chat = pipeline("text-generation", model="microsoft/phi-2")
def chat_with_model(prompt):
response = chat(prompt, max_length=50)
return response[0]['generated_text']
# Create the Gradio interface
iface = gr.Interface(
fn=chat_with_model,
inputs=gr.inputs.Textbox(lines=2, placeholder="Type your message here..."),
outputs="text",
title="Chat with LLM",
description="Type a message and chat with a language model."
)
if __name__ == "__main__":
iface.launch()