Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline | |
from langchain_core.output_parsers import StrOutputParser | |
# Initialize the text generation pipeline | |
model_name = "Qwen/Qwen2.5-0.5B-Instruct" | |
pipe = pipeline("text-generation", model=model_name, device=-1) | |
parser = StrOutputParser() | |
# Streamlit app | |
st.title("Optimized Batch Text Generation with Qwen Model") | |
# Text input from the user | |
user_input = st.text_area("Enter your messages (one per line):", "Who are you?\nWhat is your purpose?") | |
# Generate text when the button is clicked | |
if st.button("Generate"): | |
# Split input into multiple messages | |
user_messages = user_input.splitlines() | |
messages = [message.strip() for message in user_messages if message.strip()] | |
# Process messages in a batch | |
outputs = pipe(messages, max_new_tokens=50) # Adjust max_new_tokens as needed | |
# Display the generated text for each input message | |
st.write("Generated Responses:") | |
for i, output in enumerate(outputs): | |
generated_text = output['generated_text'] | |
result = parser.invoke(generated_text) | |
st.write(f"Input {i+1}: {messages[i]}") | |
st.write(f"Response {i+1}: {result}\n") |