Stefan Dumitrescu commited on
Commit
0957f7e
β€’
1 Parent(s): 76d0859
Files changed (1) hide show
  1. app.py +23 -29
app.py CHANGED
@@ -13,22 +13,35 @@ from transformers import AutoTokenizer, AutoModelWithLMHead
13
  st.set_page_config(
14
  page_title="Romanian Text Generator",
15
  page_icon="πŸ‡·πŸ‡΄",
 
16
  )
17
 
18
- tokenizer = AutoTokenizer.from_pretrained("dumitrescustefan/gpt-neo-romanian-780m")
 
 
 
19
 
20
 
21
- @st.cache
22
- def load_model(model_name):
23
- model = AutoModelWithLMHead.from_pretrained("dumitrescustefan/gpt-neo-romanian-780m")
24
- return model
25
 
26
- model = load_model("dumitrescustefan/gpt-neo-romanian-780m")
27
 
28
 
29
- def infer(input_ids, max_length, temperature, top_k, top_p):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  output_sequences = model.generate(
31
- input_ids=input_ids,
32
  max_length=max_length,
33
  temperature=temperature,
34
  top_k=top_k,
@@ -40,26 +53,7 @@ def infer(input_ids, max_length, temperature, top_k, top_p):
40
  return output_sequences
41
 
42
 
43
- default_value = "See how a modern neural network auto-completes your text πŸ€— This site, built by the Hugging Face team, lets you write a whole document directly from your browser, and you can trigger the Transformer anywhere using the Tab key. Its like having a smart machine that completes your thoughts πŸ˜€ Get started by typing a custom snippet, check out the repository, or try one of the examples. Have fun!"
44
-
45
- # prompts
46
- st.title("Write")
47
- st.write(
48
- "The almighty king of text generation, GPT-2 comes in four available sizes, only three of which have been publicly made available. Feared for its fake news generation capabilities, it currently stands as the most syntactically coherent model. A direct successor to the original GPT, it reinforces the already established pre-training/fine-tuning killer duo. From the paper: Language Models are Unsupervised Multitask Learners by Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei and Ilya Sutskever.")
49
-
50
- sent = st.text_area("Text", default_value, height=275)
51
- max_length = st.sidebar.slider("Max Length", min_value=10, max_value=30)
52
- temperature = st.sidebar.slider("Temperature", value=1.0, min_value=0.0, max_value=1.0, step=0.05)
53
- top_k = st.sidebar.slider("Top-k", min_value=0, max_value=5, value=0)
54
- top_p = st.sidebar.slider("Top-p", min_value=0.0, max_value=1.0, step=0.05, value=0.9)
55
-
56
- encoded_prompt = tokenizer.encode(sent, add_special_tokens=False, return_tensors="pt")
57
- if encoded_prompt.size()[-1] == 0:
58
- input_ids = None
59
- else:
60
- input_ids = encoded_prompt
61
-
62
- output_sequences = infer(input_ids, max_length, temperature, top_k, top_p)
63
 
64
  for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
65
  print(f"=== GENERATED SEQUENCE {generated_sequence_idx + 1} ===")
@@ -79,5 +73,5 @@ for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
79
  generated_sequences.append(total_sequence)
80
  print(total_sequence)
81
 
82
- st.write(generated_sequences[-1])
83
 
 
13
  st.set_page_config(
14
  page_title="Romanian Text Generator",
15
  page_icon="πŸ‡·πŸ‡΄",
16
+ layout="wide"
17
  )
18
 
19
+ model_list = ["dumitrescustefan/gpt-neo-romanian-780m"]
20
+ st.sidebar.header("Select Model")
21
+ model_checkpoint = st.sidebar.radio("", model_list)
22
+ text_element = st.text_input('Text:', 'Acesta este un exemplu,')
23
 
24
 
 
 
 
 
25
 
 
26
 
27
 
28
+ st.sidebar.header("Select type of PERSON detection")
29
+ max_length = st.sidebar.slider("Max Length", value=20, min_value=10, max_value=200)
30
+ temperature = st.sidebar.slider("Temperature", value=1.0, min_value=0.0, max_value=1.0, step=0.05)
31
+ top_k = st.sidebar.slider("Top-k", min_value=0, max_value=15, step=1, value=0)
32
+ top_p = st.sidebar.slider("Top-p", min_value=0.0, max_value=1.0, step=0.05, value=0.9)
33
+
34
+
35
+ @st.cache(allow_output_mutation=True)
36
+ def setModel(model_name):
37
+ model = AutoModelWithLMHead.from_pretrained(model_name)
38
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
39
+ return model, tokenizer
40
+
41
+ def infer(model, tokenizer, text, input_ids, max_length, temperature, top_k, top_p):
42
+ encoded_prompt = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt")
43
  output_sequences = model.generate(
44
+ input_ids=encoded_prompt.input_ids,
45
  max_length=max_length,
46
  temperature=temperature,
47
  top_k=top_k,
 
53
  return output_sequences
54
 
55
 
56
+ output_sequences = infer(model, tokenizer, text_element, input_ids, max_length, temperature, top_k, top_p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
59
  print(f"=== GENERATED SEQUENCE {generated_sequence_idx + 1} ===")
 
73
  generated_sequences.append(total_sequence)
74
  print(total_sequence)
75
 
76
+ st.write(generated_sequences[-1], text_element)
77