joys631's picture
Update app.py
7d29a02 verified
raw
history blame
1.25 kB
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from gradio import gr
MODEL_NAME = "allenai/cosmo-xl"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
def generate_text(situation, instructions, prompt):
"""
Generate text using the specified model and inputs.
Args:
situation: A short description of the context or situation.
instructions: Specific instructions or constraints for text generation.
prompt: The initial text prompt for the model to start from.
Returns:
The generated text.
"""
inputs = tokenizer([situation, instructions, prompt], return_tensors="pt")
outputs = model.generate(**inputs)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return generated_text
interface = gr.Interface(
generate_text,
[
gr.Textbox(label="Situation"),
gr.Textbox(label="Instructions"),
gr.Textbox(label="Prompt"),
],
"textbox",
theme="huggingface",
title="Cosmopolitan Conversationalist",
description="Generate creative text with context and instructions!",
)
interface.launch(server_name="Cosmopolitan_Conversationalist")