File size: 1,863 Bytes
6dff1ee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
session_name: base
data_directory: "data"
data_type: "ashaar_proc"
log_directory: "deep-learning-models/log_dir_ashaar"
load_training_data: true
load_test_data: false
load_validation_data: true
n_training_examples: null # null load all training examples, good for fast loading
n_test_examples: null # null load all test examples
n_validation_examples: null # null load all validation examples
test_file_name: "test.csv"
is_data_preprocessed: false # The data file is organized as (original text | text | diacritics)
data_separator: '|' # Required if the data already processed
diacritics_separator: '*' # Required if the data already processed
text_encoder: ArabicEncoderWithStartSymbol
text_cleaner: valid_arabic_cleaners # a white list that uses only Arabic letters, punctuations, and a space
max_len: 600 # sentences larger than this size will not be used
max_sen_len: null
max_steps: 10000
learning_rate: 0.001
batch_size: 32
adam_beta1: 0.9
adam_beta2: 0.999
use_decay: true
weight_decay: 0.0
embedding_dim: 256
use_prenet: false
prenet_sizes: [512, 256]
cbhg_projections: [128, 256]
cbhg_filters: 16
cbhg_gru_units: 256
post_cbhg_layers_units: [256, 256]
post_cbhg_use_batch_norm: true
use_mixed_precision: false
optimizer_type: Adam
device: cpu
# LOGGING
evaluate_frequency: 50000000
max_eval_batches: 100
evaluate_with_error_rates_frequency: 1000
n_predicted_text_tensorboard: 10 # To be written to the tensorboard
model_save_frequency: 5000
train_plotting_frequency: 50000000 # No plotting for this model
n_steps_avg_losses: [100, 500, 1_000, 5_000] # command line display of average loss values for the last n steps
error_rates_n_batches: 10000 # if calculating error rate is slow, then you can specify the number of batches to be calculated
test_model_path: null # load the last saved model
train_resume_model_path: null # load last saved model
|