yzhang511 commited on
Commit
60d6df3
1 Parent(s): ef6ff47

Upload config.yaml

Browse files
Files changed (1) hide show
  1. config.yaml +61 -0
config.yaml ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_class: STPatch # NDT2 is a sub-class of STPatch
2
+
3
+
4
+ encoder:
5
+
6
+ stitching: false
7
+ from_pt: null
8
+ embed_region: false
9
+
10
+ masker:
11
+ force_active: true
12
+ mode: all
13
+ ratio: 0.3 # ratio of data to predict
14
+ zero_ratio: 1.0 # of the data to predict, ratio of zero-ed out
15
+ random_ratio: 1.0 # of the not zero-ed, ratio of randomly replaced
16
+ expand_prob: 0.0 # probability of expanding the mask in "temporal" mode
17
+ max_timespan: 1 # max span of mask if expanded
18
+ channels: null # neurons to mask in "co-smoothing" mode
19
+ timesteps: null # time steps to mask in "forward-pred" mode
20
+ mask_regions: ['all'] # brain regions to mask in "inter-region" mode
21
+ target_regions: ['all'] # brain regions to predict in "intra-region" mode
22
+ n_mask_regions: 1 # num of regions to choose from the list of mask_regions or target_regions
23
+
24
+ patcher:
25
+ active: true
26
+ time_stride: 0
27
+
28
+ # context available for each timestep
29
+ context:
30
+ forward: -1
31
+ backward: -1
32
+
33
+ embedder:
34
+ n_neurons: 1280
35
+ n_timesteps: 100
36
+ max_time_F: 1
37
+ max_space_F: 128
38
+ max_spikes: 0 # max number of spikes in a single time bin
39
+ mode: linear # linear/embed/identity
40
+ mult: 2 # embedding multiplier. hiddden_sizd = n_channels * mult
41
+ act: softsign # activation for the embedding layers
42
+ scale: 1 # scale the embedding multiplying by this number
43
+ bias: true # use bias in the embedding layer
44
+ dropout: 0.2 # dropout in embedding layer
45
+ use_prompt: true
46
+ use_session: true
47
+
48
+
49
+ transformer:
50
+ n_layers: 5 # number of transformer layers
51
+ hidden_size: 128 # hidden space of the transformer
52
+ n_heads: 8 # number of attentiomn heads
53
+ attention_bias: true # learn bias in the attention layers
54
+ act: gelu # activiation function in mlp layers
55
+ inter_size: 512 # intermediate dimension in the mlp layers
56
+ mlp_bias: true # learn bias in the mlp layers
57
+ dropout: 0.4 # dropout in transformer layers
58
+ fixup_init: true # modify weight initialization
59
+
60
+ decoder:
61
+ from_pt: null