activations: Tanh batch_size: 8 class_identifier: referenceless_regression_metric dropout: 0.1 encoder_learning_rate: 1.0e-06 encoder_model: XLM-RoBERTa final_activation: null hidden_sizes: - 2048 - 1024 keep_embeddings_frozen: true layer: mix layer_norm: false layer_transformation: sparsemax layerwise_decay: 0.95 learning_rate: 1.5e-05 load_pretrained_weights: true loss: mse nr_frozen_epochs: 0.3 optimizer: AdamW pool: avg pretrained_model: xlm-roberta-large train_data: - /content/drive/MyDrive/WMT QE Shared Task 2023/data/train.enmr.augmented.1.csv validation_data: - /content/drive/MyDrive/WMT QE Shared Task 2023/data/dev.enmr.df.short.new.csv warmup_steps: 0