Update checkpoint for transformers>=4.29
Browse filesFollowing the merge of [a PR](https://github.com/huggingface/transformers/pull/24310) in `transformers` it appeared that this model was not properly converted. This PR will fix the inference and was tested using the following script:
```python
>>> from transformers import AutoTokenizer, MarianMTModel
>>> tokenizer = AutoTokenizer.from_pretrained('Helsinki-NLP/opus-mt-tc-big-gmq-zlw')
>>> inputs = tokenizer("' >>en<< Hey how are you?'", return_tensors="pt", padding=True)
>>> model = MarianMTModel.from_pretrained('Helsinki-NLP/opus-mt-tc-big-gmq-zlw')
>>> print(tokenizer.batch_decode(model.generate(**inputs)))
['<pad> "Hho how how are you?" "Hho how a are you? "? "? "ho ho ho a a a a you you??? "? "? "???? " "ho ho a a you? "????????? "? "?'ho ho ho " " a " " " " " " "? " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " " "</s>']
```
- .gitattributes +1 -0
- config.json +1 -2
- generation_config.json +1 -1
- model.safetensors +3 -0
- pytorch_model.bin +2 -2
@@ -30,3 +30,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
30 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
31 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
32 |
*.spm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
30 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
31 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
32 |
*.spm filter=lfs diff=lfs merge=lfs -text
|
33 |
+
model.safetensors filter=lfs diff=lfs merge=lfs -text
|
@@ -1,5 +1,4 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "/tmp/Helsinki-NLP/opus-mt-tc-big-gmq-zlw",
|
3 |
"activation_dropout": 0.0,
|
4 |
"activation_function": "relu",
|
5 |
"architectures": [
|
@@ -40,7 +39,7 @@
|
|
40 |
"share_encoder_decoder_embeddings": true,
|
41 |
"static_position_embeddings": true,
|
42 |
"torch_dtype": "float16",
|
43 |
-
"transformers_version": "4.
|
44 |
"use_cache": true,
|
45 |
"vocab_size": 58521
|
46 |
}
|
|
|
1 |
{
|
|
|
2 |
"activation_dropout": 0.0,
|
3 |
"activation_function": "relu",
|
4 |
"architectures": [
|
|
|
39 |
"share_encoder_decoder_embeddings": true,
|
40 |
"static_position_embeddings": true,
|
41 |
"torch_dtype": "float16",
|
42 |
+
"transformers_version": "4.34.0.dev0",
|
43 |
"use_cache": true,
|
44 |
"vocab_size": 58521
|
45 |
}
|
@@ -12,5 +12,5 @@
|
|
12 |
"num_beams": 4,
|
13 |
"pad_token_id": 58520,
|
14 |
"renormalize_logits": true,
|
15 |
-
"transformers_version": "4.
|
16 |
}
|
|
|
12 |
"num_beams": 4,
|
13 |
"pad_token_id": 58520,
|
14 |
"renormalize_logits": true,
|
15 |
+
"transformers_version": "4.34.0.dev0"
|
16 |
}
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f440ba3cae9ca1f435d532931a09826533e9071ed82a18cdc6ff54ea8a29a18e
|
3 |
+
size 472712330
|
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa346919959fa144a3abd63edd82574e9e859a87711a0edcbaccff21b3d264b2
|
3 |
+
size 472769285
|