Uploading MVAE in asenella/mmnist_MVAEconfig2_seed_0_ratio_00_c
Browse files- README.md +13 -0
- decoders.pkl +3 -0
- encoders.pkl +3 -0
- model.pt +3 -0
- model_config.json +1 -0
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: en
|
3 |
+
tags:
|
4 |
+
- multivae
|
5 |
+
license: apache-2.0
|
6 |
+
---
|
7 |
+
|
8 |
+
### Downloading this model from the Hub
|
9 |
+
This model was trained with multivae. It can be downloaded or reloaded using the method `load_from_hf_hub`
|
10 |
+
```python
|
11 |
+
>>> from multivae.models import AutoModel
|
12 |
+
>>> model = AutoModel.load_from_hf_hub(hf_hub_path="your_hf_username/repo_name")
|
13 |
+
```
|
decoders.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0ac0ba96dcfb47564060330bd60c0123484fce7edfafb7a90d28a1dca5a153b1
|
3 |
+
size 22915318
|
encoders.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8b5a8f4ae890fa3719a67264abe0d52795f1202911671a027016ce8e9e8dc84b
|
3 |
+
size 43873119
|
model.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dd6cbafedc894b932d7005acf6e6ff11f6de1e3d737bcef539a94d2d64f5c971
|
3 |
+
size 66731231
|
model_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"name": "MVAEConfig", "n_modalities": 5, "latent_dim": 512, "input_dims": {"m0": [3, 28, 28], "m1": [3, 28, 28], "m2": [3, 28, 28], "m3": [3, 28, 28], "m4": [3, 28, 28]}, "uses_likelihood_rescaling": false, "rescale_factors": null, "decoders_dist": {"m0": "laplace", "m1": "laplace", "m2": "laplace", "m3": "laplace", "m4": "laplace"}, "decoder_dist_params": {"m0": {"scale": 0.75}, "m1": {"scale": 0.75}, "m2": {"scale": 0.75}, "m3": {"scale": 0.75}, "m4": {"scale": 0.75}}, "custom_architectures": ["encoders", "decoders"], "use_subsampling": true, "k": 0, "warmup": 0, "beta": 1.0}
|