Upload model
Browse files- README.md +1 -1
- config.json +2 -2
- dataset.py +0 -2
- generation_config.json +1 -1
- modelling_cxrmate_ed.py +0 -1
README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
---
|
2 |
language:
|
3 |
- en
|
4 |
-
license: apache-2.0
|
5 |
library_name: transformers
|
|
|
6 |
tags:
|
7 |
- chest X-ray report generation
|
8 |
- radiology report generation
|
|
|
1 |
---
|
2 |
language:
|
3 |
- en
|
|
|
4 |
library_name: transformers
|
5 |
+
license: apache-2.0
|
6 |
tags:
|
7 |
- chest X-ray report generation
|
8 |
- radiology report generation
|
config.json
CHANGED
@@ -61,6 +61,7 @@
|
|
61 |
"history"
|
62 |
],
|
63 |
"min_length": 0,
|
|
|
64 |
"model_type": "llama",
|
65 |
"no_repeat_ngram_size": 0,
|
66 |
"num_attention_heads": 12,
|
@@ -234,9 +235,8 @@
|
|
234 |
"typical_p": 1.0,
|
235 |
"use_bfloat16": false
|
236 |
},
|
237 |
-
"is_encoder_decoder": true,
|
238 |
"model_type": "vision-encoder-decoder",
|
239 |
"tie_word_embeddings": false,
|
240 |
"torch_dtype": "float32",
|
241 |
-
"transformers_version": "4.
|
242 |
}
|
|
|
61 |
"history"
|
62 |
],
|
63 |
"min_length": 0,
|
64 |
+
"mlp_bias": false,
|
65 |
"model_type": "llama",
|
66 |
"no_repeat_ngram_size": 0,
|
67 |
"num_attention_heads": 12,
|
|
|
235 |
"typical_p": 1.0,
|
236 |
"use_bfloat16": false
|
237 |
},
|
|
|
238 |
"model_type": "vision-encoder-decoder",
|
239 |
"tie_word_embeddings": false,
|
240 |
"torch_dtype": "float32",
|
241 |
+
"transformers_version": "4.42.3"
|
242 |
}
|
dataset.py
CHANGED
@@ -8,8 +8,6 @@ import torch
|
|
8 |
from torch.utils.data import Dataset
|
9 |
from torchvision.io import decode_image, read_image
|
10 |
|
11 |
-
from data.mimic_cxr.dcm_processing import load_and_preprocess_dcm_uint16
|
12 |
-
from tools.mimic_iv.ed_cxr.records import EDCXRSubjectRecords
|
13 |
from tools.utils import mimic_cxr_image_path
|
14 |
|
15 |
# Ordered by oblique, lateral, AP, and then PA views so that PA views are closest in position to the generated tokens (and oblique is furtherest).
|
|
|
8 |
from torch.utils.data import Dataset
|
9 |
from torchvision.io import decode_image, read_image
|
10 |
|
|
|
|
|
11 |
from tools.utils import mimic_cxr_image_path
|
12 |
|
13 |
# Ordered by oblique, lateral, AP, and then PA views so that PA views are closest in position to the generated tokens (and oblique is furtherest).
|
generation_config.json
CHANGED
@@ -3,5 +3,5 @@
|
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
"pad_token_id": 4,
|
6 |
-
"transformers_version": "4.
|
7 |
}
|
|
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
"pad_token_id": 4,
|
6 |
+
"transformers_version": "4.42.3"
|
7 |
}
|
modelling_cxrmate_ed.py
CHANGED
@@ -6,7 +6,6 @@ from typing import Optional, Tuple, Union
|
|
6 |
|
7 |
import duckdb
|
8 |
import pandas as pd
|
9 |
-
import streamlit as st
|
10 |
import torch
|
11 |
import transformers
|
12 |
from torch.nn import CrossEntropyLoss
|
|
|
6 |
|
7 |
import duckdb
|
8 |
import pandas as pd
|
|
|
9 |
import torch
|
10 |
import transformers
|
11 |
from torch.nn import CrossEntropyLoss
|