Stefan Dumitrescu commited on
Commit
9506fdb
1 Parent(s): 519ca88
Files changed (2) hide show
  1. app.py +117 -51
  2. requirements.txt +1 -0
app.py CHANGED
@@ -1,17 +1,14 @@
1
- import transformers
2
  import streamlit as st
3
-
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
- st.write("Streamlit version:", st.__version__)
7
-
8
  st.set_page_config(
9
  page_title="Romanian Text Generator",
10
  page_icon="🇷🇴",
11
  layout="wide"
12
  )
13
 
14
- st.write("Type your text here and press Ctrl+Enter to generate the next sequence:")
 
15
 
16
  model_list = [
17
  "dumitrescustefan/gpt-neo-romanian-780m",
@@ -20,68 +17,137 @@ model_list = [
20
  "readerbench/RoGPT2-large"
21
  ]
22
 
23
- st.sidebar.header("Select model")
24
- model_checkpoint = st.sidebar.selectbox("Select model", model_list)
25
-
26
-
27
- t1, t2, t3, t4, t5, t6 = st.tabs(["Greedy", "Beam-search", "Sampling", "Top-k Sampling", "Top-p Sampling", "Typical Sampling"])
28
- t1.write("Greedy")
29
- t2.write("Greedy2")
30
- t3.write("Greedy3")
31
- t4.write("Greedy4")
32
-
33
 
 
 
 
 
 
 
 
 
 
34
 
35
- st.sidebar.header("Select generation parameters")
36
- max_length = t5.slider("Max Length", value=20, min_value=10, max_value=200)
37
- temperature = st.sidebar.slider("Temperature", value=1.0, min_value=0.0, max_value=1.0, step=0.05)
38
- top_k = st.sidebar.slider("Top-k", min_value=0, max_value=15, step=1, value=0)
39
- top_p = st.sidebar.slider("Top-p", min_value=0.0, max_value=1.0, step=0.05, value=0.9)
 
 
 
 
 
 
40
 
41
- text_element = st.text_input('Text:', 'Acesta este un exemplu,')
 
 
 
 
 
 
 
 
 
 
42
 
43
  @st.cache(allow_output_mutation=True)
44
  def setModel(model_checkpoint):
45
  model = AutoModelForCausalLM.from_pretrained(model_checkpoint)
46
  tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
47
  return model, tokenizer
 
48
 
49
- def infer(model, tokenizer, text, max_length, temperature, top_k, top_p):
50
- encoded_prompt = tokenizer(text, add_special_tokens=False, return_tensors="pt")
51
 
52
- output_sequences = model.generate(
53
- input_ids=encoded_prompt.input_ids,
54
- max_length=max_length,
55
- temperature=temperature,
56
- top_k=top_k,
57
- top_p=top_p,
58
- do_sample=True,
59
- num_return_sequences=1
60
- )
61
 
62
- return output_sequences
63
 
64
- #model, tokenizer = setModel(model_checkpoint)
65
- #output_sequences = infer(model, tokenizer, text_element, max_length, temperature, top_k, top_p)
66
- """
67
- for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
68
- print(f"=== GENERATED SEQUENCE {generated_sequence_idx + 1} ===")
69
- generated_sequences = generated_sequence.tolist()
70
 
71
- # Decode text
72
- text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
73
 
74
- # Remove all text after the stop token
75
- # text = text[: text.find(args.stop_token) if args.stop_token else None]
76
 
77
- # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
78
- total_sequence = (
79
- text_element + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)):]
80
- )
 
 
 
 
 
 
 
 
 
81
 
82
- generated_sequences.append(total_sequence)
83
- print(total_sequence)
 
84
 
85
- st.write(generated_sequences[-1], text_element)
86
 
87
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
 
 
4
  st.set_page_config(
5
  page_title="Romanian Text Generator",
6
  page_icon="🇷🇴",
7
  layout="wide"
8
  )
9
 
10
+ #############################################
11
+ # Python stuff here
12
 
13
  model_list = [
14
  "dumitrescustefan/gpt-neo-romanian-780m",
 
17
  "readerbench/RoGPT2-large"
18
  ]
19
 
20
+ def greedy_search(model, input_ids, attention_mask, no_repeat_ngram_size, max_length):
21
+ return model.generate(
22
+ input_ids=input_ids,
23
+ attention_mask=attention_mask,
24
+ no_repeat_ngram_size=no_repeat_ngram_size,
25
+ max_length=max_length
26
+ )
 
 
 
27
 
28
+ def beam_search(model, input_ids, attention_mask, no_repeat_ngram_size, max_length, num_beams):
29
+ return model.generate(
30
+ input_ids=input_ids,
31
+ attention_mask=attention_mask,
32
+ no_repeat_ngram_size=no_repeat_ngram_size,
33
+ max_length=max_length,
34
+ num_beams=num_beams,
35
+ early_stopping=True
36
+ )
37
 
38
+ def sampling(model, input_ids, attention_mask, no_repeat_ngram_size, max_length, temperature, top_k, top_p):
39
+ return model.generate(
40
+ input_ids=input_ids,
41
+ attention_mask=attention_mask,
42
+ no_repeat_ngram_size=no_repeat_ngram_size,
43
+ max_length=max_length,
44
+ do_sample=True,
45
+ temperature=temperature,
46
+ top_k=top_k,
47
+ top_p=top_p
48
+ )
49
 
50
+ def typical_sampling(model, input_ids, attention_mask, no_repeat_ngram_size, max_length, temperature, typical_p):
51
+ return model.generate(
52
+ input_ids=input_ids,
53
+ attention_mask=attention_mask,
54
+ no_repeat_ngram_size=no_repeat_ngram_size,
55
+ max_length=max_length,
56
+ do_sample=True,
57
+ temperature=temperature,
58
+ typical_p=typical_p,
59
+ top_k=0
60
+ )
61
 
62
  @st.cache(allow_output_mutation=True)
63
  def setModel(model_checkpoint):
64
  model = AutoModelForCausalLM.from_pretrained(model_checkpoint)
65
  tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
66
  return model, tokenizer
67
+ #############################################
68
 
69
+ col1, _, col2 = st.columns([10, 1, 16])
 
70
 
71
+ with col1:
72
+ st.write("Step 1: Select model")
 
 
 
 
 
 
 
73
 
74
+ model_checkpoint = st.selectbox("Select model", model_list)
75
 
76
+ st.write("Step 2: Adjust specific text generation parameters")
 
 
 
 
 
77
 
78
+ tab_greedy, tab_beamsearch, tab_sampling, tab_typical = st.tabs(["Greedy", "Beam-search", "Sampling", "Typical Sampling"])
 
79
 
80
+ with tab_greedy:
81
+ st.write("as")
82
 
83
+ with tab_beamsearch:
84
+ num_beams = st.slider("Num beams", min_value=1, max_value=30, step=5, value=5)
85
+
86
+ with tab_sampling:
87
+ top_p = st.slider("Top-p", min_value=0.0, max_value=1.0, step=0.05, value=0.9)
88
+ top_k = st.slider("Top-k", min_value=0, max_value=100, step=10, value=0)
89
+
90
+ with tab_typical:
91
+ typical_p = st.slider("Typical-p", min_value=0., max_value=1., step=.10, value=1.0)
92
+
93
+ st.markdown("""---""")
94
+
95
+ st.write("Step 3: Adjust common text generation parameters")
96
 
97
+ no_repeat_ngrams = st.slider("No repeat n-grams", value=2, min_value=0, max_value=3)
98
+ temperature = st.slider("Temperature", value=1.0, min_value=0.0, max_value=1.0, step=0.05)
99
+ max_length = st.slider("Max Length", value=20, min_value=10, max_value=200)
100
 
 
101
 
102
+ with col2:
103
+ with st.container():
104
+ button_greedy = st.button("Greedy")
105
+ button_beam_search = st.button("Beam-search")
106
+ button_sampling = st.button("Sampling")
107
+ button_typical = st.button("Typical sampling")
108
+
109
+
110
+ @st.cache(allow_output_mutation=True)
111
+ def setModel(model_checkpoint):
112
+ model = AutoModelForCausalLM.from_pretrained(model_checkpoint)
113
+ tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
114
+ return model, tokenizer
115
+
116
+ #####################################################
117
+ # run-time
118
+
119
+ if 'text' not in st.session_state:
120
+ st.session_state['text'] = 'Acesta este un exemplu de text generat de un model de limbă.'
121
+
122
+ details = ""
123
+
124
+ if button_greedy:
125
+ model, tokenizer = setModel(model_checkpoint)
126
+ tokenized_text = tokenizer(st.session_state['text'], add_special_tokens=False, return_tensors="pt")
127
+ input_ids = tokenized_text.input_ids
128
+ attention_mask = tokenized_text.attention_mask
129
+ output = greedy_search(model, input_ids, attention_mask, no_repeat_ngrams, max_length)
130
+ st.session_state['text'] = tokenizer.decode(output[0], skip_special_tokens=True)
131
+ details = "Text generated using greedy decoding"
132
+
133
+ if button_sampling:
134
+ model, tokenizer = setModel(model_checkpoint)
135
+ tokenized_text = tokenizer(st.session_state['text'], add_special_tokens=False, return_tensors="pt")
136
+ input_ids = tokenized_text.input_ids
137
+ attention_mask = tokenized_text.attention_mask
138
+ output = sampling(model, input_ids, attention_mask, no_repeat_ngrams, max_length, temperature, top_k, top_p)
139
+ st.session_state['text'] = tokenizer.decode(output[0], skip_special_tokens=True)
140
+ details = f"Text generated using sampling, top-p={top_p:.2f}, top-k={top_k:.2f}, temperature={temperature:.2f}"
141
+
142
+ if button_typical:
143
+ model, tokenizer = setModel(model_checkpoint)
144
+ tokenized_text = tokenizer(st.session_state['text'], add_special_tokens=False, return_tensors="pt")
145
+ input_ids = tokenized_text.input_ids
146
+ attention_mask = tokenized_text.attention_mask
147
+ output = typical_sampling(model, input_ids, attention_mask, no_repeat_ngrams, max_length, temperature, typical_p)
148
+ st.session_state['text'] = tokenizer.decode(output[0], skip_special_tokens=True)
149
+ details = f"Text generated using typical sampling, typical-p={typical_p:.2f}, temperature={temperature:.2f}"
150
+
151
+ text_element = col2.text_area('Text:', height=400, key="text")
152
+ if details != "":
153
+ col2.write(details)
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  streamlit>=1.12.0
 
2
  git+https://github.com/huggingface/transformers
3
  torch
4
  protobuf~=3.19.0
 
1
  streamlit>=1.12.0
2
+ st_btn_select
3
  git+https://github.com/huggingface/transformers
4
  torch
5
  protobuf~=3.19.0