zuozuo commited on
Commit
9d1d724
1 Parent(s): bed8885
Files changed (7) hide show
  1. .gitignore +1 -0
  2. app.py +12 -4
  3. config.json +28 -0
  4. requirements.txt +0 -0
  5. special_tokens_map.json +1 -0
  6. tokenizer_config.json +1 -0
  7. vocab.txt +0 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ tf_model.h5
app.py CHANGED
@@ -1,15 +1,23 @@
1
  '''
2
  Author: zuojianghua
3
  Date: 2022-07-06 10:40:08
4
- LastEditTime: 2022-07-06 10:40:13
5
  LastEditors: zuojianghua
6
  Description:
7
  FilePath: /zuo/app.py
8
  '''
9
  import gradio as gr
 
 
 
 
10
 
11
- def greet(name):
12
- return "Hello " + name + "!!"
 
13
 
14
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
 
 
 
15
  iface.launch()
 
1
  '''
2
  Author: zuojianghua
3
  Date: 2022-07-06 10:40:08
4
+ LastEditTime: 2022-07-07 17:03:34
5
  LastEditors: zuojianghua
6
  Description:
7
  FilePath: /zuo/app.py
8
  '''
9
  import gradio as gr
10
+ from transformers import BertTokenizer, GPT2LMHeadModel,TextGenerationPipeline
11
+ tokenizer = BertTokenizer.from_pretrained(".")
12
+ model = GPT2LMHeadModel.from_pretrained(".")
13
+ text_generator = TextGenerationPipeline(model, tokenizer)
14
 
15
+ def poem(cls):
16
+ txt = text_generator('[CLS]'+cls, max_length=100, do_sample=True)
17
+ return txt
18
 
19
+ iface = gr.Interface(
20
+ fn=poem,
21
+ inputs=gr.Textbox(lines=1, placeholder="春眠不觉晓,"),
22
+ outputs="text")
23
  iface.launch()
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "embd_pdrop": 0.1,
8
+ "gradient_checkpointing": false,
9
+ "initializer_range": 0.02,
10
+ "layer_norm_epsilon": 1e-05,
11
+ "model_type": "gpt2",
12
+ "n_ctx": 1024,
13
+ "n_embd": 768,
14
+ "n_head": 12,
15
+ "n_inner": null,
16
+ "n_layer": 12,
17
+ "n_positions": 1024,
18
+ "output_past": true,
19
+ "resid_pdrop": 0.1,
20
+ "task_specific_params": {
21
+ "text-generation": {
22
+ "do_sample": true,
23
+ "max_length": 120
24
+ }
25
+ },
26
+ "tokenizer_class": "BertTokenizer",
27
+ "vocab_size": 22557
28
+ }
requirements.txt ADDED
File without changes
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": null, "do_basic_tokenize": true, "never_split": null}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff