Spaces:
Sleeping
Sleeping
spedrox-sac
commited on
Commit
•
29bf986
1
Parent(s):
9a97c5a
Update app.py
Browse files
app.py
CHANGED
@@ -1,21 +1,35 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import pipeline
|
3 |
from langchain_core.output_parsers import StrOutputParser
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
6 |
parser = StrOutputParser()
|
|
|
7 |
# Streamlit app
|
8 |
-
st.title("Text Generation with Qwen Model")
|
9 |
|
10 |
# Text input from the user
|
11 |
-
user_input = st.
|
12 |
|
13 |
# Generate text when the button is clicked
|
14 |
if st.button("Generate"):
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
#
|
20 |
-
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import pipeline, AutoTokenizer
|
3 |
from langchain_core.output_parsers import StrOutputParser
|
4 |
+
|
5 |
+
# Initialize the tokenizer and text generation pipeline
|
6 |
+
model_name = "Qwen/Qwen2.5-0.5B-Instruct"
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
|
8 |
+
pipe = pipeline("text-generation", model=model_name, device=-1)
|
9 |
parser = StrOutputParser()
|
10 |
+
|
11 |
# Streamlit app
|
12 |
+
st.title("Optimized Batch Text Generation with Qwen Model")
|
13 |
|
14 |
# Text input from the user
|
15 |
+
user_input = st.text_area("Enter your messages (one per line):", "Who are you?\nWhat is your purpose?")
|
16 |
|
17 |
# Generate text when the button is clicked
|
18 |
if st.button("Generate"):
|
19 |
+
# Split input into multiple messages
|
20 |
+
user_messages = user_input.splitlines()
|
21 |
+
messages = [message.strip() for message in user_messages if message.strip()]
|
22 |
+
|
23 |
+
# Tokenize messages
|
24 |
+
tokenized_inputs = [tokenizer(message, return_tensors='pt') for message in messages]
|
25 |
+
|
26 |
+
# Process tokenized messages in a batch
|
27 |
+
outputs = [pipe(input_ids=tokenized_input['input_ids'], max_new_tokens=50)[0] for tokenized_input in tokenized_inputs]
|
28 |
+
|
29 |
+
# Display the generated text for each input message
|
30 |
+
st.write("Generated Responses:")
|
31 |
+
for i, output in enumerate(outputs):
|
32 |
+
generated_text = output['generated_text']
|
33 |
+
result = parser.invoke(generated_text)
|
34 |
+
st.write(f"Input {i+1}: {messages[i]}")
|
35 |
+
st.write(f"Response {i+1}: {result}\n")
|