Upload model
Browse files- README.md +18 -9
- config.json +4 -4
- tf_model.h5 +2 -2
README.md
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
---
|
2 |
license: mit
|
3 |
-
base_model: gpt2
|
4 |
tags:
|
5 |
- generated_from_keras_callback
|
6 |
model-index:
|
7 |
-
- name:
|
8 |
results: []
|
9 |
---
|
10 |
|
11 |
<!-- This model card has been generated automatically according to the information Keras had access to. You should
|
12 |
probably proofread and complete it, then remove this comment. -->
|
13 |
|
14 |
-
#
|
15 |
|
16 |
-
This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
|
17 |
It achieves the following results on the evaluation set:
|
18 |
-
- Train Loss: 9.
|
19 |
-
- Validation Loss: 9.
|
20 |
-
- Epoch:
|
21 |
|
22 |
## Model description
|
23 |
|
@@ -36,14 +36,23 @@ More information needed
|
|
36 |
### Training hyperparameters
|
37 |
|
38 |
The following hyperparameters were used during training:
|
39 |
-
- optimizer: {'inner_optimizer': {'module': 'transformers.optimization_tf', 'class_name': 'AdamWeightDecay', 'config': {'name': 'AdamWeightDecay', 'learning_rate': {'module': 'transformers.optimization_tf', 'class_name': 'WarmUp', 'config': {'initial_learning_rate': 5e-07, 'decay_schedule_fn': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5e-07, 'decay_steps': -293, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'warmup_steps': 1000, 'power': 1.0, 'name': None}, 'registered_name': 'WarmUp'}, 'decay': 0.0, 'beta_1': 0.8999999761581421, 'beta_2': 0.9990000128746033, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.
|
40 |
- training_precision: mixed_float16
|
41 |
|
42 |
### Training results
|
43 |
|
44 |
| Train Loss | Validation Loss | Epoch |
|
45 |
|:----------:|:---------------:|:-----:|
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
|
49 |
### Framework versions
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
+
base_model: gpt2-large
|
4 |
tags:
|
5 |
- generated_from_keras_callback
|
6 |
model-index:
|
7 |
+
- name: enhanced_turkishReviews-generativeAI
|
8 |
results: []
|
9 |
---
|
10 |
|
11 |
<!-- This model card has been generated automatically according to the information Keras had access to. You should
|
12 |
probably proofread and complete it, then remove this comment. -->
|
13 |
|
14 |
+
# enhanced_turkishReviews-generativeAI
|
15 |
|
16 |
+
This model is a fine-tuned version of [gpt2-large](https://huggingface.co/gpt2-large) on an unknown dataset.
|
17 |
It achieves the following results on the evaluation set:
|
18 |
+
- Train Loss: 9.7770
|
19 |
+
- Validation Loss: 9.7482
|
20 |
+
- Epoch: 9
|
21 |
|
22 |
## Model description
|
23 |
|
|
|
36 |
### Training hyperparameters
|
37 |
|
38 |
The following hyperparameters were used during training:
|
39 |
+
- optimizer: {'inner_optimizer': {'module': 'transformers.optimization_tf', 'class_name': 'AdamWeightDecay', 'config': {'name': 'AdamWeightDecay', 'learning_rate': {'module': 'transformers.optimization_tf', 'class_name': 'WarmUp', 'config': {'initial_learning_rate': 5e-07, 'decay_schedule_fn': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5e-07, 'decay_steps': -293, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'warmup_steps': 1000, 'power': 1.0, 'name': None}, 'registered_name': 'WarmUp'}, 'decay': 0.0, 'beta_1': 0.8999999761581421, 'beta_2': 0.9990000128746033, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.09}, 'registered_name': 'AdamWeightDecay'}, 'dynamic': True, 'initial_scale': 32768.0, 'dynamic_growth_steps': 2000}
|
40 |
- training_precision: mixed_float16
|
41 |
|
42 |
### Training results
|
43 |
|
44 |
| Train Loss | Validation Loss | Epoch |
|
45 |
|:----------:|:---------------:|:-----:|
|
46 |
+
| 10.1112 | 9.8654 | 0 |
|
47 |
+
| 9.8034 | 9.7482 | 1 |
|
48 |
+
| 9.7773 | 9.7482 | 2 |
|
49 |
+
| 9.7774 | 9.7482 | 3 |
|
50 |
+
| 9.7772 | 9.7482 | 4 |
|
51 |
+
| 9.7770 | 9.7482 | 5 |
|
52 |
+
| 9.7770 | 9.7482 | 6 |
|
53 |
+
| 9.7774 | 9.7482 | 7 |
|
54 |
+
| 9.7776 | 9.7482 | 8 |
|
55 |
+
| 9.7770 | 9.7482 | 9 |
|
56 |
|
57 |
|
58 |
### Framework versions
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "gpt2",
|
3 |
"activation_function": "gelu_new",
|
4 |
"architectures": [
|
5 |
"GPT2LMHeadModel"
|
@@ -13,12 +13,12 @@
|
|
13 |
"model_type": "gpt2",
|
14 |
"n_ctx": 40,
|
15 |
"n_embd": 768,
|
16 |
-
"n_head":
|
17 |
"n_inner": null,
|
18 |
-
"n_layer":
|
19 |
"n_positions": 1024,
|
20 |
"reorder_and_upcast_attn": false,
|
21 |
-
"resid_pdrop": 0.
|
22 |
"scale_attn_by_inverse_layer_idx": false,
|
23 |
"scale_attn_weights": true,
|
24 |
"summary_activation": null,
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "gpt2-large",
|
3 |
"activation_function": "gelu_new",
|
4 |
"architectures": [
|
5 |
"GPT2LMHeadModel"
|
|
|
13 |
"model_type": "gpt2",
|
14 |
"n_ctx": 40,
|
15 |
"n_embd": 768,
|
16 |
+
"n_head": 16,
|
17 |
"n_inner": null,
|
18 |
+
"n_layer": 16,
|
19 |
"n_positions": 1024,
|
20 |
"reorder_and_upcast_attn": false,
|
21 |
+
"resid_pdrop": 0.2,
|
22 |
"scale_attn_by_inverse_layer_idx": false,
|
23 |
"scale_attn_weights": true,
|
24 |
"summary_activation": null,
|
tf_model.h5
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:55561abda80ee7e21db5f23c2f6bfe0a8a0399d5803a3bc91cdb95bff17532f0
|
3 |
+
size 533811512
|