egosumkira commited on
Commit
c61d2bb
1 Parent(s): 5d790bb
Files changed (2) hide show
  1. app.py +45 -0
  2. requirements.txt +1 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import GPT2Tokenizer, TFGPT2LMHeadModel, pipeline
2
+ import gradio as gr
3
+
4
+
5
+ model = TFGPT2LMHeadModel.from_pretrained("egosumkira/gpt2-fantasy")
6
+ tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
7
+
8
+ story = pipeline(
9
+ "text-generation",
10
+ model=model,
11
+ tokenizer=tokenizer,
12
+ device=0
13
+ )
14
+
15
+
16
+ def generate(tags_text, start_text="", temp=1.0, n_beams=3, max_l=128):
17
+ tags = tags_text.split(", ")
18
+ prefix = f"~^{'^'.join(tags)}~@{start_text}"
19
+ g_text = story(prefix, temperature=1.0, max_length=128, repetition_penalty=7.0, num_beams=3)[0]['generated_text']
20
+ return g_text[g_text.find("@") + 1:]
21
+
22
+
23
+ title = "GPT2 fantasy story generator"
24
+ description = "This model can generate short fantasy story based on set of keywords and (optional) start of the text."
25
+
26
+ title = "GPT2 fantasy story generator"
27
+ description = "This model can generate short fantasy story based on set of keywords and (optional) start of the text."
28
+
29
+ iface = gr.Interface(generate,
30
+ inputs = [
31
+ gr.Textbox(label="Keywords (comma separated)"),
32
+ gr.Textbox(label="Beggining of the text (optional)"),
33
+ gr.inputs.Slider(0, 2, default=1.0, step=0.05, label="Temperature"),
34
+ gr.Number(label="Max lenght", value=128),
35
+ gr.inputs.Slider(1, 10, default=3, label="Number of beams in Beam Search", step=1)
36
+ ],
37
+ outputs = gr.Textbox(label="Output"),
38
+ title=title,
39
+ description=description
40
+ )
41
+
42
+
43
+ if __name__ == "__main__":
44
+ iface.queue()
45
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ transformers==4.29.2