POMAHSLS commited on
Commit
ce03d07
·
verified ·
1 Parent(s): 86d0f86

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ journal.txt filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: ITLT Journal
3
- emoji: 🐨
4
- colorFrom: purple
5
- colorTo: red
6
  sdk: gradio
7
  sdk_version: 4.36.1
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: ITLT_Journal
3
+ app_file: zapusk.py
 
 
4
  sdk: gradio
5
  sdk_version: 4.36.1
 
 
6
  ---
 
 
finetunning.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import os
3
+ import transformers
4
+ import torch
5
+ from transformers import TextDataset, DataCollatorForLanguageModeling
6
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
7
+ from transformers import Trainer, TrainingArguments
8
+
9
+ print(torch.cuda.is_available())
10
+
11
+ def load_dataset(file_path, tokenizer, block_size=128):
12
+ dataset = TextDataset(
13
+ tokenizer=tokenizer,
14
+ file_path=file_path,
15
+ block_size=block_size,
16
+ )
17
+ return dataset
18
+
19
+ def load_data_collator(tokenizer, mlm=False):
20
+ data_collator = DataCollatorForLanguageModeling(
21
+ tokenizer=tokenizer,
22
+ mlm=mlm,
23
+ )
24
+ return data_collator
25
+
26
+ def train(train_file_path, model_name, output_dir, overwrite_output_dir,
27
+ per_device_train_batch_size, num_train_epochs, save_steps, resume_from_checkpoint):
28
+ from transformers import AutoTokenizer, AutoModelForCausalLM
29
+ tokenizer = AutoTokenizer.from_pretrained("malteos/gpt2-uk")
30
+ train_dataset = load_dataset(train_file_path, tokenizer)
31
+ data_collator = load_data_collator(tokenizer)
32
+ tokenizer.save_pretrained(output_dir)
33
+ model = AutoModelForCausalLM.from_pretrained("malteos/gpt2-uk")
34
+ model.save_pretrained(output_dir)
35
+
36
+ training_args = TrainingArguments(
37
+ output_dir=output_dir,
38
+ overwrite_output_dir=overwrite_output_dir,
39
+ per_device_train_batch_size=per_device_train_batch_size,
40
+ num_train_epochs=num_train_epochs,
41
+ )
42
+
43
+ trainer = Trainer(
44
+ model=model,
45
+ args=training_args,
46
+ data_collator=data_collator,
47
+ train_dataset=train_dataset,
48
+ )
49
+
50
+ trainer.train(resume_from_checkpoint=resume_from_checkpoint)
51
+ trainer.save_model()
52
+
53
+ train_directory = 'H:/Finetunning/q_and_a'
54
+ train_file_path = 'H:/Finetunning/journal.txt'
55
+ model_name = train_directory
56
+ output_dir = 'H:/Finetunning/custom_full_text'
57
+ overwrite_output_dir = False
58
+ per_device_train_batch_size = 8
59
+ num_train_epochs = 51
60
+ save_steps = 50000
61
+
62
+ print("Починаємо навчання...")
63
+ train(
64
+ train_file_path=train_file_path,
65
+ model_name=model_name,
66
+ output_dir=output_dir,
67
+ overwrite_output_dir=overwrite_output_dir,
68
+ per_device_train_batch_size=per_device_train_batch_size,
69
+ num_train_epochs=num_train_epochs,
70
+ save_steps=save_steps,
71
+ resume_from_checkpoint=True # False для першого разу, True - з якоїсь точки остановки
72
+ )
journal.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:197f8a6c94ec784f2762c9707815bbecf75e6c2fff2a18f9337688c8d34c9166
3
+ size 112245281
lastversion/config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "malteos/gpt2-uk",
3
+ "activation_function": "gelu",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "gradient_checkpointing": false,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 1024,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
+ "n_inner": 3072,
19
+ "n_layer": 12,
20
+ "n_positions": 1024,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "tokenizer_class": "PreTrainedTokenizerFast",
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.41.1",
33
+ "use_cache": true,
34
+ "vocab_size": 50304
35
+ }
lastversion/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.41.1"
6
+ }
lastversion/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e087e5da6ad22b2a7f4252d9041c1f77f112c6091571d8005ee7267f5a55902
3
+ size 497918592
lastversion/special_tokens_map.json ADDED
Binary file (582 Bytes). View file
 
lastversion/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
lastversion/tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<pad>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "bos_token": "<s>",
37
+ "clean_up_tokenization_spaces": true,
38
+ "eos_token": "</s>",
39
+ "model_max_length": 1000000000000000019884624838656,
40
+ "pad_token": "<pad>",
41
+ "tokenizer_class": "PreTrainedTokenizerFast",
42
+ "unk_token": "<unk>"
43
+ }
lastversion/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01b787660d6af5c39639f0bf313aa28fe2f4718ca9c0c99cce7eb1aa278fa849
3
+ size 5112
zapusk.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ import gradio as gr
3
+
4
+ model = pipeline("text-generation",
5
+
6
+ model="H:/Finetunning/lastversion")
7
+
8
+ def predict(prompt):
9
+ completion = model(prompt, max_length=50)[0]["generated_text"]
10
+ return completion
11
+
12
+ gr.Interface(fn=predict, inputs="text", outputs="text").launch(share=True)