Rainsilves commited on
Commit
090ac93
·
1 Parent(s): f2118ef

fundemental rewrite to make it WAY MORE AWESOME

Browse files
Files changed (2) hide show
  1. app.py +41 -7
  2. requirements.txt +1 -1
app.py CHANGED
@@ -1,6 +1,5 @@
1
- from simplet5 import SimpleT5
2
  import streamlit as st
3
- import pytorch_lightning as pl
4
 
5
 
6
  st.set_page_config(page_title="Reassuring Parables")
@@ -11,8 +10,10 @@ st.caption("Find me on Linkedin: https://www.linkedin.com/in/allen-roush-2772101
11
  st.image("https://imgs.xkcd.com/comics/reassuring.png")
12
  st.caption("From https://xkcd.com/1263/")
13
 
 
14
  # instantiate
15
- model = SimpleT5()
 
16
 
17
  # load (supports t5, mt5, byT5 models)
18
  #model.from_pretrained("t5","t5-base")
@@ -79,16 +80,49 @@ def train_model():
79
 
80
  # load trained T5 model
81
 
 
 
82
  with st.spinner("Please wait while the model loads:"):
83
- model = SimpleT5()
84
- model.load_model("t5","Hellisotherpeople/T5_Reassuring_Parables")
 
 
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
- number_of_parables = st.sidebar.number_input("Select how many reassuring parables you want to generate", value = 100)
88
  #seed_value = st.sidebar.number_input("Select a seed value - change this to get different output", 42) ## Doesn't work :(
89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
  with st.spinner("Generating Reassuring Parables"):
92
  #pl.seed_everything(seed_value)
93
- st.write(model.predict("Computers will never", num_return_sequences = number_of_parables, num_beams = number_of_parables))
 
 
 
94
 
 
 
1
  import streamlit as st
2
+ import csv
3
 
4
 
5
  st.set_page_config(page_title="Reassuring Parables")
 
10
  st.image("https://imgs.xkcd.com/comics/reassuring.png")
11
  st.caption("From https://xkcd.com/1263/")
12
 
13
+
14
  # instantiate
15
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
16
+
17
 
18
  # load (supports t5, mt5, byT5 models)
19
  #model.from_pretrained("t5","t5-base")
 
80
 
81
  # load trained T5 model
82
 
83
+
84
+
85
  with st.spinner("Please wait while the model loads:"):
86
+ tokenizer = AutoTokenizer.from_pretrained("Hellisotherpeople/T5_Reassuring_Parables")
87
+ model = AutoModelForSeq2SeqLM.from_pretrained("Hellisotherpeople/T5_Reassuring_Parables")
88
+
89
+ form = st.sidebar.form("choose_settings")
90
 
91
+ form.header("Main Settings")
92
+
93
+ number_of_parables = form.number_input("Select how many reassuring parables you want to generate", value = 100, max_value = 1000)
94
+ max_length_of_parable = form.number_input("What's the max length of the parable?", value = 20, max_value = 128)
95
+ min_length_of_parable = form.number_input("What's the min length of the parable?", value = 0, max_value = max_length_of_parable)
96
+ top_k = form.number_input("What value of K should we use for Top-K sampling? Set to zero to disable", value = 50)
97
+ form.caption("In Top-K sampling, the K most likely next words are filtered and the probability mass is redistributed among only those K next words. ")
98
+ top_p = form.number_input("What value of P should we use for Top-p sampling? Set to zero to disable", value = 0.95, max_value = 1.0, min_value = 0.0)
99
+ form.caption("Top-p sampling chooses from the smallest possible set of words whose cumulative probability exceeds the probability p. The probability mass is then redistributed among this set of words.")
100
+ temperature = form.number_input("How spicy/interesting do we want our models output to be", value = 1.05, min_value = 0.0)
101
+ form.caption("Setting this higher decreases the likelihood of high probability words and increases the likelihood of low probability (and presumably more interesting) words")
102
+ form.caption("For more details on what these settings mean, see here: https://huggingface.co/blog/how-to-generate")
103
+ form.form_submit_button("Generate some Reassuring Parables!")
104
 
 
105
  #seed_value = st.sidebar.number_input("Select a seed value - change this to get different output", 42) ## Doesn't work :(
106
 
107
+ input_ids = tokenizer.encode("Computers will never", return_tensors='pt')
108
+
109
+ sample_outputs = model.generate(
110
+ input_ids,
111
+ do_sample=True,
112
+ max_length=max_length_of_parable,
113
+ min_length=min_length_of_parable,
114
+ top_k=top_k,
115
+ top_p=top_p,
116
+ num_return_sequences=number_of_parables,
117
+ temperature=temperature
118
+ )
119
+
120
+
121
 
122
  with st.spinner("Generating Reassuring Parables"):
123
  #pl.seed_everything(seed_value)
124
+ list_of_parables = []
125
+ for i, sample_output in enumerate(sample_outputs):
126
+ list_of_parables.append(tokenizer.decode(sample_output, skip_special_tokens=True))
127
+ st.write(list_of_parables)
128
 
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
  streamlit==1.2.0
2
- simplet5
 
1
  streamlit==1.2.0
2
+ transformers