Uploading JMVAE in asenella/reproduce_jmvae_seed_2
Browse files- README.md +13 -0
- decoders.pkl +3 -0
- encoders.pkl +3 -0
- joint_encoder.pkl +3 -0
- model.pt +3 -0
- model_config.json +1 -0
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: en
|
3 |
+
tags:
|
4 |
+
- multivae
|
5 |
+
license: apache-2.0
|
6 |
+
---
|
7 |
+
|
8 |
+
### Downloading this model from the Hub
|
9 |
+
This model was trained with multivae. It can be downloaded or reloaded using the method `load_from_hf_hub`
|
10 |
+
```python
|
11 |
+
>>> from multivae.models import AutoModel
|
12 |
+
>>> model = AutoModel.load_from_hf_hub(hf_hub_path="your_hf_username/repo_name")
|
13 |
+
```
|
decoders.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0b5ca587432a17ba75584dde991d1a88db9c6620683c907fcd93302178658f02
|
3 |
+
size 4020770
|
encoders.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d0429d0f846f7c6ae90bc61ba442dbaad92e4b7269fee5e53bace2c36a4aebc0
|
3 |
+
size 4283434
|
joint_encoder.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0c58485d51513cb92c5445ce9d7797fc6ef6366cbf859826ce7f63ec6c95e56e
|
3 |
+
size 4008176
|
model.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c2dff96c0ba7d042929a7d8951169dcb1d994954e5ba5b05acfc9acf880621a8
|
3 |
+
size 12257571
|
model_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"name": "JMVAEConfig", "n_modalities": 2, "latent_dim": 64, "input_dims": {"images": [1, 28, 28], "labels": [1]}, "uses_likelihood_rescaling": false, "rescale_factors": null, "decoders_dist": {"images": "bernoulli", "labels": "categorical"}, "decoder_dist_params": {}, "custom_architectures": ["encoders", "decoders", "joint_encoder"], "alpha": 0.1, "warmup": 200}
|