fracapuano commited on
Commit
55ff87e
·
verified ·
1 Parent(s): 65f400e

Add EEGViT config

Browse files
Files changed (2) hide show
  1. config.json +27 -14
  2. configuration_eegvit.py +50 -0
config.json CHANGED
@@ -1,25 +1,38 @@
1
  {
2
- "architectures": [
3
- "EEGViTAutoModel"
4
- ],
5
- "attention_probs_dropout_prob": 0.0,
6
  "auto_map": {
7
- "AutoModel": "eegvit_model.EEGViTAutoModel"
8
  },
9
- "encoder_stride": 16,
10
- "hidden_act": "gelu",
11
- "hidden_dropout_prob": 0.0,
 
 
 
 
 
 
 
 
 
 
 
 
12
  "hidden_size": 768,
13
- "image_size": 224,
 
 
 
14
  "initializer_range": 0.02,
15
  "intermediate_size": 3072,
16
  "layer_norm_eps": 1e-12,
17
- "model_type": "vit",
18
  "num_attention_heads": 12,
19
- "num_channels": 3,
20
  "num_hidden_layers": 12,
21
- "patch_size": 16,
22
- "qkv_bias": true,
23
- "torch_dtype": "float32",
 
24
  "transformers_version": "4.46.1"
25
  }
 
1
  {
2
+ "attention_probs_dropout_prob": 0.1,
 
 
 
3
  "auto_map": {
4
+ "AutoConfig": "configuration_eegvit.EEGViTConfig"
5
  },
6
+ "classifier_dropout": 0.1,
7
+ "conv1_kernel_size": [
8
+ 1,
9
+ 36
10
+ ],
11
+ "conv1_out_channels": 256,
12
+ "conv1_padding": [
13
+ 0,
14
+ 2
15
+ ],
16
+ "conv1_stride": [
17
+ 1,
18
+ 36
19
+ ],
20
+ "hidden_dropout_prob": 0.1,
21
  "hidden_size": 768,
22
+ "image_size": [
23
+ 129,
24
+ 14
25
+ ],
26
  "initializer_range": 0.02,
27
  "intermediate_size": 3072,
28
  "layer_norm_eps": 1e-12,
29
+ "model_type": "eegvit",
30
  "num_attention_heads": 12,
31
+ "num_channels": 256,
32
  "num_hidden_layers": 12,
33
+ "patch_size": [
34
+ 8,
35
+ 1
36
+ ],
37
  "transformers_version": "4.46.1"
38
  }
configuration_eegvit.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+ class EEGViTConfig(PretrainedConfig):
4
+ model_type = "eegvit"
5
+
6
+ def __init__(
7
+ self,
8
+ conv1_out_channels=256,
9
+ conv1_kernel_size=(1, 36),
10
+ conv1_stride=(1, 36),
11
+ conv1_padding=(0, 2),
12
+ num_channels=256,
13
+ image_size=(129, 14),
14
+ patch_size=(8, 1),
15
+ hidden_size=768,
16
+ num_hidden_layers=12,
17
+ num_attention_heads=12,
18
+ intermediate_size=3072,
19
+ hidden_dropout_prob=0.1,
20
+ attention_probs_dropout_prob=0.1,
21
+ initializer_range=0.02,
22
+ layer_norm_eps=1e-12,
23
+ classifier_dropout=0.1,
24
+ num_labels=2,
25
+ **kwargs
26
+ ):
27
+ super().__init__(**kwargs)
28
+
29
+ # Conv1 settings
30
+ self.conv1_out_channels = conv1_out_channels
31
+ self.conv1_kernel_size = conv1_kernel_size
32
+ self.conv1_stride = conv1_stride
33
+ self.conv1_padding = conv1_padding
34
+
35
+ # ViT specific settings
36
+ self.num_channels = num_channels
37
+ self.image_size = image_size
38
+ self.patch_size = patch_size
39
+ self.hidden_size = hidden_size
40
+ self.num_hidden_layers = num_hidden_layers
41
+ self.num_attention_heads = num_attention_heads
42
+ self.intermediate_size = intermediate_size
43
+ self.hidden_dropout_prob = hidden_dropout_prob
44
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
45
+ self.initializer_range = initializer_range
46
+ self.layer_norm_eps = layer_norm_eps
47
+
48
+ # Classifier settings
49
+ self.classifier_dropout = classifier_dropout
50
+ self.num_labels = num_labels