SAMBOOM commited on
Commit
34c9838
1 Parent(s): a894ebe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -40
app.py CHANGED
@@ -1,47 +1,44 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
- tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
  @st.cache_data
6
  def load_model(model_name):
7
- model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
8
- return model
9
-
10
- model = load_model("mistralai/Mixtral-8x7B-Instruct-v0.1")
11
-
12
- def infer(input_ids, max_length, temperature, top_k, top_p):
13
- input_ids = torch.tensor(input_ids).unsqueeze(0)
14
- output_sequences = model.generate(
15
- input_ids=input_ids,
16
- max_length=max_length,
17
- temperature=temperature,
18
- top_k=top_k,
19
- top_p=top_p,
20
- do_sample=True,
21
- num_return_sequences=1
22
- )
23
- return output_sequences
24
-
25
- default_value = "Ask me anything!"
26
 
27
  #prompts
28
- st.title("Chatbot")
29
-
30
- sent = st.text_area("Message", default_value, height = 100)
31
- max_length = st.sidebar.slider("Max Length", min_value = 10, max_value=30)
32
- temperature = st.sidebar.slider("Temperature", value = 1.0, min_value = 0.0, max_value=1.0, step=0.05)
33
- top_k = st.sidebar.slider("Top-k", min_value = 0, max_value=5, value = 0)
34
- top_p = st.sidebar.slider("Top-p", min_value = 0.0, max_value=1.0, step = 0.05, value = 0.9)
35
-
36
- encoded_prompt = tokenizer.encode(sent, add_special_tokens=False, return_tensors="pt")
37
- if encoded_prompt.size()[-1] == 0:
38
- input_ids = None
39
- else:
40
- input_ids = encoded_prompt
41
-
42
- output_sequences = infer(input_ids, max_length, temperature, top_k, top_p)
43
-
44
- for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
45
- generated_sequences = generated_sequence.tolist()
46
- text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
47
- st.write(text)
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
+ tokenizer = AutoTokenizer.from_pretrained("gpt2-large")
5
  @st.cache_data
6
  def load_model(model_name):
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+ return model
9
+
10
+ model = load_model("gpt2-large")
11
+
12
+ def infer(sent, max_length, temperature, top_k, top_p):
13
+ input_ids = tokenizer.encode(sent, return_tensors="pt")
14
+ output_sequences = model.generate(
15
+ input_ids=input_ids,
16
+ max_length=max_length,
17
+ temperature=temperature,
18
+ top_k=top_k,
19
+ top_p=top_p,
20
+ do_sample=True,
21
+ num_return_sequences=1
22
+ )
23
+ return output_sequences
24
+
25
+ default_value = "You: Ask me anything!"
26
 
27
  #prompts
28
+ st.title("Chat with GPT-2 💬")
29
+ st.write("GPT-2 is a large transformer-based language model with 1.5 billion parameters. It is trained to predict the next word in a sentence, given all of the previous words. This makes it great for text generation and for answering questions about the text it's given.")
30
+
31
+ messages = [{"role": "system", "content": "You are a helpful assistant."}]
32
+
33
+ user_input = st.text_input("You:", default_value)
34
+ if user_input:
35
+ messages.append({"role": "user", "content": user_input})
36
+
37
+ output_sequences = infer(user_input, max_length=100, temperature=0.7, top_k=40, top_p=0.9)
38
+ generated_sequence = output_sequences[0].tolist()
39
+ generated_text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
40
+
41
+ messages.append({"role": "assistant", "content": generated_text})
42
+
43
+ for message in messages:
44
+ st.write(f"{message['role']}: {message['content']}")