KeeeeepGoing
commited on
Commit
•
e348c58
1
Parent(s):
4c80f1b
Upload 7 files
Browse files- README.md +49 -3
- config.json +1 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +30 -0
- test_metrics.json +1 -0
- tokenizer_config.json +44 -0
- vocab.txt +75 -0
README.md
CHANGED
@@ -1,3 +1,49 @@
|
|
1 |
-
---
|
2 |
-
license: cc-by-nc-sa-4.0
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: cc-by-nc-sa-4.0
|
3 |
+
widget:
|
4 |
+
- text: AAAACATAATAATTTGCCGACTTACTCACCCTGTGATTAATCTATTTTCACTGTGTAGTAAGTAGAGAGTGTTACTTACTACAGTATCTATTTTTGTTTGGATGTTTGCCGTGGACAAGTGCTAACTGTCAAAACCCGTTTTGACCTTAAACCCAGCAATAATAATAATGTAAAACTCCATTGGGCAGTGCAACCTACTCCTCACATATTATATTATAATTCCTAAACCTTGATCAGTTAAATTAATAGCTCTGTTCCCTGTGGCTTTATATAAACACCATGGTTGTCAGCAGTTCAGCA
|
5 |
+
tags:
|
6 |
+
- DNA
|
7 |
+
- biology
|
8 |
+
- genomics
|
9 |
+
---
|
10 |
+
# Plant foundation DNA large language models
|
11 |
+
|
12 |
+
The plant DNA large language models (LLMs) contain a series of foundation models based on different model architectures, which are pre-trained on various plant reference genomes.
|
13 |
+
All the models have a comparable model size between 90 MB and 150 MB, BPE tokenizer is used for tokenization and 8000 tokens are included in the vocabulary.
|
14 |
+
|
15 |
+
|
16 |
+
**Developed by:** zhangtaolab
|
17 |
+
|
18 |
+
### Model Sources
|
19 |
+
|
20 |
+
- **Repository:** [Plant DNA LLMs](https://github.com/zhangtaolab/plant_DNA_LLMs)
|
21 |
+
- **Manuscript:** [Versatile applications of foundation DNA large language models in plant genomes]()
|
22 |
+
|
23 |
+
### Architecture
|
24 |
+
|
25 |
+
The model is trained based on the State-Space Mamba-130m model with modified tokenizer specific for DNA sequence.
|
26 |
+
|
27 |
+
This model is fine-tuned for predicting active core promoters.
|
28 |
+
|
29 |
+
### How to use
|
30 |
+
|
31 |
+
Install the runtime library first:
|
32 |
+
```bash
|
33 |
+
pip install transformers
|
34 |
+
pip install causal-conv1d<=1.2.0
|
35 |
+
pip install mamba-ssm<2.0.0
|
36 |
+
```
|
37 |
+
|
38 |
+
Since `transformers` library (version < 4.43.0) does not provide a MambaForSequenceClassification function, we wrote a script to train Mamba model for sequence classification.
|
39 |
+
An inference code can be found in our [GitHub](https://github.com/zhangtaolab/plant_DNA_LLMs).
|
40 |
+
Note that Plant DNAMamba model requires NVIDIA GPU to run.
|
41 |
+
|
42 |
+
|
43 |
+
### Training data
|
44 |
+
We use a custom MambaForSequenceClassification script to fine-tune the model.
|
45 |
+
Detailed training procedure can be found in our manuscript.
|
46 |
+
|
47 |
+
|
48 |
+
#### Hardware
|
49 |
+
Model was trained on a NVIDIA GTX4090 GPU (24 GB).
|
config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"d_model": 768, "n_layer": 24, "vocab_size": 75, "ssm_cfg": {}, "rms_norm": true, "residual_in_fp32": true, "fused_add_norm": true, "pad_vocab_size_multiple": 1, "tie_embeddings": true}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:554acbd07c94044fc7e4cfd44128aeb0f251aace8e42c64f470e342deddce804
|
3 |
+
size 362410522
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "<cls>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "<mask>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<pad>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<unk>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
test_metrics.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{'test_loss': 0.4215187430381775, 'test_accuracy': 0.8085336538461538, 'test_f1': 0.8130939809926082, 'test_precision': 0.7941783176713271, 'test_recall': 0.8329326923076923, 'test_matthews_correlation': 0.6178033189011165, 'test_runtime': 26.083, 'test_samples_per_second': 318.981, 'test_steps_per_second': 19.936}
|
tokenizer_config.json
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<unk>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "<mask>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<cls>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
}
|
35 |
+
},
|
36 |
+
"clean_up_tokenization_spaces": true,
|
37 |
+
"cls_token": "<cls>",
|
38 |
+
"eos_token": null,
|
39 |
+
"mask_token": "<mask>",
|
40 |
+
"model_max_length": 512,
|
41 |
+
"pad_token": "<pad>",
|
42 |
+
"tokenizer_class": "EsmTokenizer",
|
43 |
+
"unk_token": "<unk>"
|
44 |
+
}
|
vocab.txt
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<unk>
|
2 |
+
<pad>
|
3 |
+
<mask>
|
4 |
+
<cls>
|
5 |
+
AAA
|
6 |
+
AAT
|
7 |
+
AAC
|
8 |
+
AAG
|
9 |
+
ATA
|
10 |
+
ATT
|
11 |
+
ATC
|
12 |
+
ATG
|
13 |
+
ACA
|
14 |
+
ACT
|
15 |
+
ACC
|
16 |
+
ACG
|
17 |
+
AGA
|
18 |
+
AGT
|
19 |
+
AGC
|
20 |
+
AGG
|
21 |
+
TAA
|
22 |
+
TAT
|
23 |
+
TAC
|
24 |
+
TAG
|
25 |
+
TTA
|
26 |
+
TTT
|
27 |
+
TTC
|
28 |
+
TTG
|
29 |
+
TCA
|
30 |
+
TCT
|
31 |
+
TCC
|
32 |
+
TCG
|
33 |
+
TGA
|
34 |
+
TGT
|
35 |
+
TGC
|
36 |
+
TGG
|
37 |
+
CAA
|
38 |
+
CAT
|
39 |
+
CAC
|
40 |
+
CAG
|
41 |
+
CTA
|
42 |
+
CTT
|
43 |
+
CTC
|
44 |
+
CTG
|
45 |
+
CCA
|
46 |
+
CCT
|
47 |
+
CCC
|
48 |
+
CCG
|
49 |
+
CGA
|
50 |
+
CGT
|
51 |
+
CGC
|
52 |
+
CGG
|
53 |
+
GAA
|
54 |
+
GAT
|
55 |
+
GAC
|
56 |
+
GAG
|
57 |
+
GTA
|
58 |
+
GTT
|
59 |
+
GTC
|
60 |
+
GTG
|
61 |
+
GCA
|
62 |
+
GCT
|
63 |
+
GCC
|
64 |
+
GCG
|
65 |
+
GGA
|
66 |
+
GGT
|
67 |
+
GGC
|
68 |
+
GGG
|
69 |
+
A
|
70 |
+
T
|
71 |
+
C
|
72 |
+
G
|
73 |
+
N
|
74 |
+
<eos>
|
75 |
+
<bos>
|