coffeeee commited on
Commit
10fb5a7
1 Parent(s): c0259a8

added app files

Browse files
app.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ import nltk
4
+ import string
5
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer, GenerationConfig, set_seed
6
+ import random
7
+
8
+ response_length = 200
9
+
10
+ sentence_detector = nltk.data.load('tokenizers/punkt/english.pickle')
11
+
12
+ tokenizer = GPT2Tokenizer.from_pretrained("gpt2-medium")
13
+ tokenizer.truncation_side = 'right'
14
+
15
+ model = GPT2LMHeadModel.from_pretrained('checkpoint-10000')
16
+ generation_config = GenerationConfig.from_pretrained('gpt2-medium')
17
+ generation_config.max_new_tokens = response_length
18
+ generation_config.pad_token_id = generation_config.eos_token_id
19
+
20
+
21
+ outputs = []
22
+
23
+
24
+ def generate_response(new_prompt):
25
+
26
+ global outputs
27
+ story_so_far = "\n".join(outputs[:int(1024 / response_length + 1)])
28
+
29
+ set_seed(random.randint(0, 4000000000))
30
+ inputs = tokenizer.encode(story_so_far + '\n' + new_prompt if story_so_far else new_prompt,
31
+ return_tensors='pt', truncation=True,
32
+ max_length=1024 - response_length)
33
+
34
+ output = model.generate(inputs, do_sample=True, generation_config=generation_config)
35
+ response = clean_paragraph(tokenizer.batch_decode(output)[0][((len(story_so_far) + 1) if story_so_far else 0):])
36
+ outputs.append(response)
37
+
38
+ return ((story_so_far + '\n' if story_so_far else '') + response).replace('\n', '\n\n')
39
+
40
+ def undo():
41
+ global outputs
42
+ print(outputs)
43
+ outputs = outputs[:-1]
44
+ print(outputs)
45
+ return "\n".join(outputs).replace('\n', '\n\n')
46
+
47
+ def clean_paragraph(entry):
48
+ paragraphs = entry.split('\n')
49
+
50
+ for i in range(len(paragraphs)):
51
+ split_sentences = nltk.tokenize.sent_tokenize(paragraphs[i], language='english')
52
+ if i == len(paragraphs) - 1 and split_sentences[:1][-1] not in string.punctuation:
53
+ paragraphs[i] = " ".join(split_sentences[:-1])
54
+
55
+ return capitalize_first_char("\n".join(paragraphs))
56
+
57
+ def reset():
58
+ global outputs
59
+ outputs = []
60
+ return None
61
+
62
+ def capitalize_first_char(entry):
63
+ for i in range(len(entry)):
64
+ if entry[i].isalpha():
65
+ return entry[:i] + entry[i].upper() + entry[i + 1:]
66
+ return entry
67
+
68
+ with gr.Blocks() as demo:
69
+ story = gr.Textbox(interactive=False, lines=20)
70
+ story.style(show_copy_button=True)
71
+
72
+ prompt = gr.Textbox(placeholder="Continue the story here!", lines=3, max_lines=3)
73
+
74
+ with gr.Row():
75
+ gen_button = gr.Button('Generate')
76
+ undo_button = gr.Button("Undo")
77
+ res_button = gr.Button("Reset")
78
+
79
+ prompt.submit(generate_response, prompt, story, scroll_to_output=True)
80
+ gen_button.click(generate_response, prompt, story, scroll_to_output=True)
81
+ undo_button.click(undo, [], story, scroll_to_output=True)
82
+ res_button.click(reset, [], story, scroll_to_output=True)
83
+
84
+ demo.launch(inbrowser=True)
85
+
86
+
checkpoint-10000/config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2-medium",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 1024,
16
+ "n_head": 16,
17
+ "n_inner": null,
18
+ "n_layer": 24,
19
+ "n_positions": 1024,
20
+ "n_special": 0,
21
+ "predict_special_tokens": true,
22
+ "reorder_and_upcast_attn": false,
23
+ "resid_pdrop": 0.1,
24
+ "scale_attn_by_inverse_layer_idx": false,
25
+ "scale_attn_weights": true,
26
+ "summary_activation": null,
27
+ "summary_first_dropout": 0.1,
28
+ "summary_proj_to_labels": true,
29
+ "summary_type": "cls_index",
30
+ "summary_use_proj": true,
31
+ "task_specific_params": {
32
+ "text-generation": {
33
+ "do_sample": true,
34
+ "max_length": 50
35
+ }
36
+ },
37
+ "torch_dtype": "float32",
38
+ "transformers_version": "4.28.1",
39
+ "use_cache": true,
40
+ "vocab_size": 50257
41
+ }
checkpoint-10000/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.28.1"
6
+ }
checkpoint-10000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:714f3c35c4eaa81e750f7e2e5862edf3b104c6e8b8039cd6a5d3106805a5aced
3
+ size 2838756549
checkpoint-10000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4b4d10f7689cea0856cf9fa5ce274eb6006f9852b6cfaf137e93376448a3db9
3
+ size 1444569373
checkpoint-10000/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3edda5e835455fbfa5ef36857a5b651ac923bd4638899b1783d40cf864a4aea8
3
+ size 14583
checkpoint-10000/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cb5778788b3edc5a26f73ca723a7507e96882fb99aa3b4d3c8d3fdc4e953e3a
3
+ size 14583
checkpoint-10000/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55cba491a0505b8a12c6c03ade1fc3f2fc45ad345b948c4de63adeabcda228b2
3
+ size 557
checkpoint-10000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cb16482cc67d8670df29f2476270f3d41c3a66021703a9c5d6dc8c2e59d5ce0
3
+ size 627
checkpoint-10000/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
checkpoint-10000/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": true,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "<|endoftext|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "errors": "replace",
22
+ "model_max_length": 1024,
23
+ "pad_token": null,
24
+ "tokenizer_class": "GPT2Tokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
checkpoint-10000/trainer_state.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.195510803440319,
5
+ "global_step": 10000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.05,
12
+ "learning_rate": 6.234097519017773e-05,
13
+ "loss": 2.9092,
14
+ "step": 2500
15
+ },
16
+ {
17
+ "epoch": 1.05,
18
+ "eval_loss": 2.7911548614501953,
19
+ "eval_runtime": 359.1547,
20
+ "eval_samples_per_second": 11.8,
21
+ "eval_steps_per_second": 2.951,
22
+ "step": 2500
23
+ },
24
+ {
25
+ "epoch": 2.1,
26
+ "learning_rate": 6.137463890549805e-05,
27
+ "loss": 2.7715,
28
+ "step": 5000
29
+ },
30
+ {
31
+ "epoch": 2.1,
32
+ "eval_loss": 2.7525341510772705,
33
+ "eval_runtime": 358.9958,
34
+ "eval_samples_per_second": 11.805,
35
+ "eval_steps_per_second": 2.953,
36
+ "step": 5000
37
+ },
38
+ {
39
+ "epoch": 3.15,
40
+ "learning_rate": 5.9557478454587004e-05,
41
+ "loss": 2.688,
42
+ "step": 7500
43
+ },
44
+ {
45
+ "epoch": 3.15,
46
+ "eval_loss": 2.737689256668091,
47
+ "eval_runtime": 359.124,
48
+ "eval_samples_per_second": 11.801,
49
+ "eval_steps_per_second": 2.952,
50
+ "step": 7500
51
+ },
52
+ {
53
+ "epoch": 4.2,
54
+ "learning_rate": 5.6939618780421336e-05,
55
+ "loss": 2.6213,
56
+ "step": 10000
57
+ },
58
+ {
59
+ "epoch": 4.2,
60
+ "eval_loss": 2.728829860687256,
61
+ "eval_runtime": 359.1054,
62
+ "eval_samples_per_second": 11.802,
63
+ "eval_steps_per_second": 2.952,
64
+ "step": 10000
65
+ }
66
+ ],
67
+ "max_steps": 47660,
68
+ "num_train_epochs": 20,
69
+ "total_flos": 2.433430888256635e+17,
70
+ "trial_name": null,
71
+ "trial_params": null
72
+ }
checkpoint-10000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:725092dcfad06c0f8f972968c8270418de57a1d4a00c388e757fdc1753aa5f9f
3
+ size 3515
checkpoint-10000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
main.py DELETED
@@ -1,4 +0,0 @@
1
- import bot
2
-
3
- if __name__ == '__main__':
4
- bot.run_discord_bot()