Update app.py
Browse files
app.py
CHANGED
@@ -1,47 +1,44 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
5 |
@st.cache_data
|
6 |
def load_model(model_name):
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
model = load_model("
|
11 |
-
|
12 |
-
def infer(
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
default_value = "Ask me anything!"
|
26 |
|
27 |
#prompts
|
28 |
-
st.title("
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
generated_sequences = generated_sequence.tolist()
|
46 |
-
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
|
47 |
-
st.write(text)
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
+
tokenizer = AutoTokenizer.from_pretrained("gpt2-large")
|
5 |
@st.cache_data
|
6 |
def load_model(model_name):
|
7 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
8 |
+
return model
|
9 |
+
|
10 |
+
model = load_model("gpt2-large")
|
11 |
+
|
12 |
+
def infer(sent, max_length, temperature, top_k, top_p):
|
13 |
+
input_ids = tokenizer.encode(sent, return_tensors="pt")
|
14 |
+
output_sequences = model.generate(
|
15 |
+
input_ids=input_ids,
|
16 |
+
max_length=max_length,
|
17 |
+
temperature=temperature,
|
18 |
+
top_k=top_k,
|
19 |
+
top_p=top_p,
|
20 |
+
do_sample=True,
|
21 |
+
num_return_sequences=1
|
22 |
+
)
|
23 |
+
return output_sequences
|
24 |
+
|
25 |
+
default_value = "You: Ask me anything!"
|
26 |
|
27 |
#prompts
|
28 |
+
st.title("Chat with GPT-2 💬")
|
29 |
+
st.write("GPT-2 is a large transformer-based language model with 1.5 billion parameters. It is trained to predict the next word in a sentence, given all of the previous words. This makes it great for text generation and for answering questions about the text it's given.")
|
30 |
+
|
31 |
+
messages = [{"role": "system", "content": "You are a helpful assistant."}]
|
32 |
+
|
33 |
+
user_input = st.text_input("You:", default_value)
|
34 |
+
if user_input:
|
35 |
+
messages.append({"role": "user", "content": user_input})
|
36 |
+
|
37 |
+
output_sequences = infer(user_input, max_length=100, temperature=0.7, top_k=40, top_p=0.9)
|
38 |
+
generated_sequence = output_sequences[0].tolist()
|
39 |
+
generated_text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
|
40 |
+
|
41 |
+
messages.append({"role": "assistant", "content": generated_text})
|
42 |
+
|
43 |
+
for message in messages:
|
44 |
+
st.write(f"{message['role']}: {message['content']}")
|
|
|
|
|
|