Text Generation
KerasHub
Keras
Divyasreepat commited on
Commit
83c3441
1 Parent(s): 1d80378

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/tokenizer/vocabulary.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: keras-hub
3
+ ---
4
+ This is a [`Bloom` model](https://keras.io/api/keras_hub/models/bloom) uploaded using the KerasHub library and can be used with JAX, TensorFlow, and PyTorch backends.
5
+ Model config:
6
+ * **name:** bloom_backbone
7
+ * **trainable:** True
8
+ * **vocabulary_size:** 250880
9
+ * **num_layers:** 24
10
+ * **num_heads:** 16
11
+ * **hidden_dim:** 1536
12
+ * **intermediate_dim:** 6144
13
+ * **dropout:** 0.0
14
+ * **layer_norm_epsilon:** 1e-05
15
+
16
+ This model card has been generated automatically and should be completed by the model author. See [Model Cards documentation](https://huggingface.co/docs/hub/model-cards) for more information.
assets/tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
assets/tokenizer/vocabulary.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ff6ac5e94603046dd127cc00bf9b3e45a6ea15b97ccc59e7c8c621e897a746e
3
+ size 12497274
config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_nlp.src.models.bloom.bloom_backbone",
3
+ "class_name": "BloomBackbone",
4
+ "config": {
5
+ "name": "bloom_backbone",
6
+ "trainable": true,
7
+ "vocabulary_size": 250880,
8
+ "num_layers": 24,
9
+ "num_heads": 16,
10
+ "hidden_dim": 1536,
11
+ "intermediate_dim": 6144,
12
+ "dropout": 0.0,
13
+ "layer_norm_epsilon": 1e-05
14
+ },
15
+ "registered_name": "keras_nlp>BloomBackbone",
16
+ "assets": [],
17
+ "weights": "model.weights.h5"
18
+ }
metadata.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "keras_version": "3.0.5",
3
+ "keras_nlp_version": "0.8.0",
4
+ "parameter_count": 1065314304,
5
+ "date_saved": "2024-02-27@23:06:51"
6
+ }
model.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6f6a7ed8fe365dcd04008352a4623eb04801934c499d620828c8df064a52f06
3
+ size 2131409344
tokenizer.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_nlp.src.models.bloom.bloom_tokenizer",
3
+ "class_name": "BloomTokenizer",
4
+ "config": {
5
+ "name": "bloom_tokenizer",
6
+ "trainable": true,
7
+ "dtype": "int32",
8
+ "sequence_length": null,
9
+ "add_prefix_space": false
10
+ },
11
+ "registered_name": "keras_nlp>BloomTokenizer",
12
+ "assets": [
13
+ "assets/tokenizer/vocabulary.json",
14
+ "assets/tokenizer/merges.txt"
15
+ ],
16
+ "weights": null
17
+ }