Add new SentenceTransformer model
Browse files- 1_Pooling/config.json +10 -0
- README.md +467 -0
- config.json +26 -0
- config_sentence_transformers.json +10 -0
- model.safetensors +3 -0
- modules.json +20 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +62 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 768,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": true,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: thenlper/gte-base
|
3 |
+
library_name: sentence-transformers
|
4 |
+
metrics:
|
5 |
+
- pearson_cosine
|
6 |
+
- spearman_cosine
|
7 |
+
- pearson_manhattan
|
8 |
+
- spearman_manhattan
|
9 |
+
- pearson_euclidean
|
10 |
+
- spearman_euclidean
|
11 |
+
- pearson_dot
|
12 |
+
- spearman_dot
|
13 |
+
- pearson_max
|
14 |
+
- spearman_max
|
15 |
+
pipeline_tag: sentence-similarity
|
16 |
+
tags:
|
17 |
+
- sentence-transformers
|
18 |
+
- sentence-similarity
|
19 |
+
- feature-extraction
|
20 |
+
- generated_from_trainer
|
21 |
+
- dataset_size:206874
|
22 |
+
- loss:ContrastiveLoss
|
23 |
+
widget:
|
24 |
+
- source_sentence: 'Cardiac silhouette size is top normal. Aorta is tortuous and demonstrates
|
25 |
+
mild atherosclerotic calcifications diffusely. Hilar contours are normal. Pulmonary
|
26 |
+
vasculature is normal. Lungs are clear. No pleural effusion or pneumothorax is
|
27 |
+
present. No acute osseous abnormality is detected. '
|
28 |
+
sentences:
|
29 |
+
- 'No acute cardiopulmonary process. '
|
30 |
+
- 'No acute cardiopulmonary abnormality. '
|
31 |
+
- 'Normal chest radiographs. '
|
32 |
+
- source_sentence: 'The lungs are mildly hyperexpanded but clear. No pleural effusion
|
33 |
+
or pneumothorax is seen. The cardiac and mediastinal silhouettes are unremarkable. '
|
34 |
+
sentences:
|
35 |
+
- 'Findings worrisome for early/mild left lower lobe pneumonia. '
|
36 |
+
- 'No acute cardiopulmonary process. The mediastinum is not widened. '
|
37 |
+
- 'No radiographic evidence of acute cardiopulmonary disease. '
|
38 |
+
- source_sentence: 'Lung volumes are slightly low. The cardiomediastinal silhouette
|
39 |
+
and pulmonary vasculature a similar to the prior examination, and unremarkable,
|
40 |
+
accounting for low lung volumes. Midline sternal wires are intact and well aligned.
|
41 |
+
Mediastinal clips and anastomotic markers are noted. The lungs are clear. There
|
42 |
+
is no pleural effusion or pneumothorax. Bilateral shoulder prostheses are partially
|
43 |
+
imaged. '
|
44 |
+
sentences:
|
45 |
+
- 'No acute cardiopulmonary process. '
|
46 |
+
- 'No acute intrathoracic abnormality. '
|
47 |
+
- 'Pulmonary edema, increasing pleural effusions, known mass in the right lower
|
48 |
+
lung. '
|
49 |
+
- source_sentence: 'The left hemi thorax remains opacified. The right lung is now
|
50 |
+
clear. The right mediastinal silhouette is unchanged. An endotracheal tube feeding
|
51 |
+
tube and right internal jugular catheter remain in place. '
|
52 |
+
sentences:
|
53 |
+
- 'The right lung now appears clear. No other significant change. '
|
54 |
+
- 'No acute cardiopulmonary abnormality. '
|
55 |
+
- 'Chest findings within normal limits, no secondary metastases suspicious lesions
|
56 |
+
identified. '
|
57 |
+
- source_sentence: 'The atient is status post coronary artery bypass graft surgery.
|
58 |
+
The heart is mildly enlarged. There is a large hiatal hernia with an air-fluid
|
59 |
+
level. Otherwise, the mediastinal and hilar contours are unremarkable. The lungs
|
60 |
+
appear clear. The chest is hyperinflated. There is no pleural effusion or pneumothorax.
|
61 |
+
Bony structures are unremarkable. '
|
62 |
+
sentences:
|
63 |
+
- '1. Left apical pneumothorax still small, but considerably larger. Left base pneumothorax
|
64 |
+
also slightly larger. 2. Minimal lucency adjacent to the the aortic knob may also
|
65 |
+
represent part of the left lung pneumothorax. Attention to this area on followup
|
66 |
+
films to exclude any mediastinal air is requested. 3. Extensive subcutaneous emphysema,
|
67 |
+
equivocally slightly greater than on the prior film. 4. Minimal interval change
|
68 |
+
in position of the left chest tube. 5. Right pneumothorax also increased, still
|
69 |
+
small in width, but now seen not only at the right lung apex, but also along the
|
70 |
+
right lateral chest wall and at the right costophrenic angle in the adjoining
|
71 |
+
lung base. '
|
72 |
+
- 'No evidence of acute disease. Normal cardiac size. '
|
73 |
+
- 'No evidence of acute disease. Hyperinflation. Large hiatal hernia. Status post
|
74 |
+
coronary artery bypass graft surgery. '
|
75 |
+
model-index:
|
76 |
+
- name: SentenceTransformer based on thenlper/gte-base
|
77 |
+
results:
|
78 |
+
- task:
|
79 |
+
type: semantic-similarity
|
80 |
+
name: Semantic Similarity
|
81 |
+
dataset:
|
82 |
+
name: validation
|
83 |
+
type: validation
|
84 |
+
metrics:
|
85 |
+
- type: pearson_cosine
|
86 |
+
value: 0.8022517557853334
|
87 |
+
name: Pearson Cosine
|
88 |
+
- type: spearman_cosine
|
89 |
+
value: 0.810529949353046
|
90 |
+
name: Spearman Cosine
|
91 |
+
- type: pearson_manhattan
|
92 |
+
value: 0.8243043367211444
|
93 |
+
name: Pearson Manhattan
|
94 |
+
- type: spearman_manhattan
|
95 |
+
value: 0.8105359053829688
|
96 |
+
name: Spearman Manhattan
|
97 |
+
- type: pearson_euclidean
|
98 |
+
value: 0.824484835649088
|
99 |
+
name: Pearson Euclidean
|
100 |
+
- type: spearman_euclidean
|
101 |
+
value: 0.8105299161732425
|
102 |
+
name: Spearman Euclidean
|
103 |
+
- type: pearson_dot
|
104 |
+
value: 0.802251755767147
|
105 |
+
name: Pearson Dot
|
106 |
+
- type: spearman_dot
|
107 |
+
value: 0.8105299280214241
|
108 |
+
name: Spearman Dot
|
109 |
+
- type: pearson_max
|
110 |
+
value: 0.824484835649088
|
111 |
+
name: Pearson Max
|
112 |
+
- type: spearman_max
|
113 |
+
value: 0.8105359053829688
|
114 |
+
name: Spearman Max
|
115 |
+
---
|
116 |
+
|
117 |
+
# SentenceTransformer based on thenlper/gte-base
|
118 |
+
|
119 |
+
This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [thenlper/gte-base](https://huggingface.co/thenlper/gte-base). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
|
120 |
+
|
121 |
+
## Model Details
|
122 |
+
|
123 |
+
### Model Description
|
124 |
+
- **Model Type:** Sentence Transformer
|
125 |
+
- **Base model:** [thenlper/gte-base](https://huggingface.co/thenlper/gte-base) <!-- at revision 5e95d41db6721e7cbd5006e99c7508f0083223d6 -->
|
126 |
+
- **Maximum Sequence Length:** 512 tokens
|
127 |
+
- **Output Dimensionality:** 768 tokens
|
128 |
+
- **Similarity Function:** Cosine Similarity
|
129 |
+
<!-- - **Training Dataset:** Unknown -->
|
130 |
+
<!-- - **Language:** Unknown -->
|
131 |
+
<!-- - **License:** Unknown -->
|
132 |
+
|
133 |
+
### Model Sources
|
134 |
+
|
135 |
+
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
|
136 |
+
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
|
137 |
+
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
|
138 |
+
|
139 |
+
### Full Model Architecture
|
140 |
+
|
141 |
+
```
|
142 |
+
SentenceTransformer(
|
143 |
+
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel
|
144 |
+
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
145 |
+
(2): Normalize()
|
146 |
+
)
|
147 |
+
```
|
148 |
+
|
149 |
+
## Usage
|
150 |
+
|
151 |
+
### Direct Usage (Sentence Transformers)
|
152 |
+
|
153 |
+
First install the Sentence Transformers library:
|
154 |
+
|
155 |
+
```bash
|
156 |
+
pip install -U sentence-transformers
|
157 |
+
```
|
158 |
+
|
159 |
+
Then you can load this model and run inference.
|
160 |
+
```python
|
161 |
+
from sentence_transformers import SentenceTransformer
|
162 |
+
|
163 |
+
# Download from the 🤗 Hub
|
164 |
+
model = SentenceTransformer("hyojuuun/gte_base_MIMICCXR_FT")
|
165 |
+
# Run inference
|
166 |
+
sentences = [
|
167 |
+
'The atient is status post coronary artery bypass graft surgery. The heart is mildly enlarged. There is a large hiatal hernia with an air-fluid level. Otherwise, the mediastinal and hilar contours are unremarkable. The lungs appear clear. The chest is hyperinflated. There is no pleural effusion or pneumothorax. Bony structures are unremarkable. ',
|
168 |
+
'No evidence of acute disease. Hyperinflation. Large hiatal hernia. Status post coronary artery bypass graft surgery. ',
|
169 |
+
'1. Left apical pneumothorax still small, but considerably larger. Left base pneumothorax also slightly larger. 2. Minimal lucency adjacent to the the aortic knob may also represent part of the left lung pneumothorax. Attention to this area on followup films to exclude any mediastinal air is requested. 3. Extensive subcutaneous emphysema, equivocally slightly greater than on the prior film. 4. Minimal interval change in position of the left chest tube. 5. Right pneumothorax also increased, still small in width, but now seen not only at the right lung apex, but also along the right lateral chest wall and at the right costophrenic angle in the adjoining lung base. ',
|
170 |
+
]
|
171 |
+
embeddings = model.encode(sentences)
|
172 |
+
print(embeddings.shape)
|
173 |
+
# [3, 768]
|
174 |
+
|
175 |
+
# Get the similarity scores for the embeddings
|
176 |
+
similarities = model.similarity(embeddings, embeddings)
|
177 |
+
print(similarities.shape)
|
178 |
+
# [3, 3]
|
179 |
+
```
|
180 |
+
|
181 |
+
<!--
|
182 |
+
### Direct Usage (Transformers)
|
183 |
+
|
184 |
+
<details><summary>Click to see the direct usage in Transformers</summary>
|
185 |
+
|
186 |
+
</details>
|
187 |
+
-->
|
188 |
+
|
189 |
+
<!--
|
190 |
+
### Downstream Usage (Sentence Transformers)
|
191 |
+
|
192 |
+
You can finetune this model on your own dataset.
|
193 |
+
|
194 |
+
<details><summary>Click to expand</summary>
|
195 |
+
|
196 |
+
</details>
|
197 |
+
-->
|
198 |
+
|
199 |
+
<!--
|
200 |
+
### Out-of-Scope Use
|
201 |
+
|
202 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
203 |
+
-->
|
204 |
+
|
205 |
+
## Evaluation
|
206 |
+
|
207 |
+
### Metrics
|
208 |
+
|
209 |
+
#### Semantic Similarity
|
210 |
+
* Dataset: `validation`
|
211 |
+
* Evaluated with [<code>EmbeddingSimilarityEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator)
|
212 |
+
|
213 |
+
| Metric | Value |
|
214 |
+
|:-------------------|:-----------|
|
215 |
+
| pearson_cosine | 0.8023 |
|
216 |
+
| spearman_cosine | 0.8105 |
|
217 |
+
| pearson_manhattan | 0.8243 |
|
218 |
+
| spearman_manhattan | 0.8105 |
|
219 |
+
| pearson_euclidean | 0.8245 |
|
220 |
+
| spearman_euclidean | 0.8105 |
|
221 |
+
| pearson_dot | 0.8023 |
|
222 |
+
| spearman_dot | 0.8105 |
|
223 |
+
| pearson_max | 0.8245 |
|
224 |
+
| **spearman_max** | **0.8105** |
|
225 |
+
|
226 |
+
<!--
|
227 |
+
## Bias, Risks and Limitations
|
228 |
+
|
229 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
230 |
+
-->
|
231 |
+
|
232 |
+
<!--
|
233 |
+
### Recommendations
|
234 |
+
|
235 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
236 |
+
-->
|
237 |
+
|
238 |
+
## Training Details
|
239 |
+
|
240 |
+
### Training Dataset
|
241 |
+
|
242 |
+
#### Unnamed Dataset
|
243 |
+
|
244 |
+
|
245 |
+
* Size: 206,874 training samples
|
246 |
+
* Columns: <code>sentence_0</code>, <code>sentence_1</code>, and <code>label</code>
|
247 |
+
* Approximate statistics based on the first 1000 samples:
|
248 |
+
| | sentence_0 | sentence_1 | label |
|
249 |
+
|:--------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:--------------------------------------------------------------|
|
250 |
+
| type | string | string | float |
|
251 |
+
| details | <ul><li>min: 3 tokens</li><li>mean: 78.31 tokens</li><li>max: 324 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 26.68 tokens</li><li>max: 165 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.5</li><li>max: 1.0</li></ul> |
|
252 |
+
* Samples:
|
253 |
+
| sentence_0 | sentence_1 | label |
|
254 |
+
|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------|:-----------------|
|
255 |
+
| <code>The lung volumes are low which accentuates the linear and interstitial opacities. An ill-defined opacity in the left lung in the third/fourth interspace has increased since the prior can be early pneumonia. No pneumothorax. Mild to moderate gastric and small bowel distension partially visualized. </code> | <code>No evidence of acute cardiopulmonary disease. </code> | <code>0.0</code> |
|
256 |
+
| <code>PA and lateral views of the chest were provided demonstrating no focal consolidation, effusion or pneumothorax. The cardiomediastinal silhouette is normal. Bony structures are intact. No free air below the right hemidiaphragm. </code> | <code>No acute intrathoracic process. </code> | <code>1.0</code> |
|
257 |
+
| <code>Previously seen right-sided PICC is no longer seen. Enlargement of the cardiomediastinal silhouette is grossly stable. There are low lung volumes, which accentuate the bronchovascular markings. No focal consolidation is seen. There is no pleural effusion or pneumothorax. </code> | <code>Low lung volumes but no focal consolidation to suggest pneumonia. </code> | <code>1.0</code> |
|
258 |
+
* Loss: [<code>ContrastiveLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
|
259 |
+
```json
|
260 |
+
{
|
261 |
+
"distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
|
262 |
+
"margin": 0.5,
|
263 |
+
"size_average": true
|
264 |
+
}
|
265 |
+
```
|
266 |
+
|
267 |
+
### Training Hyperparameters
|
268 |
+
#### Non-Default Hyperparameters
|
269 |
+
|
270 |
+
- `eval_strategy`: steps
|
271 |
+
- `per_device_train_batch_size`: 96
|
272 |
+
- `per_device_eval_batch_size`: 96
|
273 |
+
- `multi_dataset_batch_sampler`: round_robin
|
274 |
+
|
275 |
+
#### All Hyperparameters
|
276 |
+
<details><summary>Click to expand</summary>
|
277 |
+
|
278 |
+
- `overwrite_output_dir`: False
|
279 |
+
- `do_predict`: False
|
280 |
+
- `eval_strategy`: steps
|
281 |
+
- `prediction_loss_only`: True
|
282 |
+
- `per_device_train_batch_size`: 96
|
283 |
+
- `per_device_eval_batch_size`: 96
|
284 |
+
- `per_gpu_train_batch_size`: None
|
285 |
+
- `per_gpu_eval_batch_size`: None
|
286 |
+
- `gradient_accumulation_steps`: 1
|
287 |
+
- `eval_accumulation_steps`: None
|
288 |
+
- `torch_empty_cache_steps`: None
|
289 |
+
- `learning_rate`: 5e-05
|
290 |
+
- `weight_decay`: 0.0
|
291 |
+
- `adam_beta1`: 0.9
|
292 |
+
- `adam_beta2`: 0.999
|
293 |
+
- `adam_epsilon`: 1e-08
|
294 |
+
- `max_grad_norm`: 1
|
295 |
+
- `num_train_epochs`: 3
|
296 |
+
- `max_steps`: -1
|
297 |
+
- `lr_scheduler_type`: linear
|
298 |
+
- `lr_scheduler_kwargs`: {}
|
299 |
+
- `warmup_ratio`: 0.0
|
300 |
+
- `warmup_steps`: 0
|
301 |
+
- `log_level`: passive
|
302 |
+
- `log_level_replica`: warning
|
303 |
+
- `log_on_each_node`: True
|
304 |
+
- `logging_nan_inf_filter`: True
|
305 |
+
- `save_safetensors`: True
|
306 |
+
- `save_on_each_node`: False
|
307 |
+
- `save_only_model`: False
|
308 |
+
- `restore_callback_states_from_checkpoint`: False
|
309 |
+
- `no_cuda`: False
|
310 |
+
- `use_cpu`: False
|
311 |
+
- `use_mps_device`: False
|
312 |
+
- `seed`: 42
|
313 |
+
- `data_seed`: None
|
314 |
+
- `jit_mode_eval`: False
|
315 |
+
- `use_ipex`: False
|
316 |
+
- `bf16`: False
|
317 |
+
- `fp16`: False
|
318 |
+
- `fp16_opt_level`: O1
|
319 |
+
- `half_precision_backend`: auto
|
320 |
+
- `bf16_full_eval`: False
|
321 |
+
- `fp16_full_eval`: False
|
322 |
+
- `tf32`: None
|
323 |
+
- `local_rank`: 0
|
324 |
+
- `ddp_backend`: None
|
325 |
+
- `tpu_num_cores`: None
|
326 |
+
- `tpu_metrics_debug`: False
|
327 |
+
- `debug`: []
|
328 |
+
- `dataloader_drop_last`: False
|
329 |
+
- `dataloader_num_workers`: 0
|
330 |
+
- `dataloader_prefetch_factor`: None
|
331 |
+
- `past_index`: -1
|
332 |
+
- `disable_tqdm`: False
|
333 |
+
- `remove_unused_columns`: True
|
334 |
+
- `label_names`: None
|
335 |
+
- `load_best_model_at_end`: False
|
336 |
+
- `ignore_data_skip`: False
|
337 |
+
- `fsdp`: []
|
338 |
+
- `fsdp_min_num_params`: 0
|
339 |
+
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
|
340 |
+
- `fsdp_transformer_layer_cls_to_wrap`: None
|
341 |
+
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
|
342 |
+
- `deepspeed`: None
|
343 |
+
- `label_smoothing_factor`: 0.0
|
344 |
+
- `optim`: adamw_torch
|
345 |
+
- `optim_args`: None
|
346 |
+
- `adafactor`: False
|
347 |
+
- `group_by_length`: False
|
348 |
+
- `length_column_name`: length
|
349 |
+
- `ddp_find_unused_parameters`: None
|
350 |
+
- `ddp_bucket_cap_mb`: None
|
351 |
+
- `ddp_broadcast_buffers`: False
|
352 |
+
- `dataloader_pin_memory`: True
|
353 |
+
- `dataloader_persistent_workers`: False
|
354 |
+
- `skip_memory_metrics`: True
|
355 |
+
- `use_legacy_prediction_loop`: False
|
356 |
+
- `push_to_hub`: False
|
357 |
+
- `resume_from_checkpoint`: None
|
358 |
+
- `hub_model_id`: None
|
359 |
+
- `hub_strategy`: every_save
|
360 |
+
- `hub_private_repo`: False
|
361 |
+
- `hub_always_push`: False
|
362 |
+
- `gradient_checkpointing`: False
|
363 |
+
- `gradient_checkpointing_kwargs`: None
|
364 |
+
- `include_inputs_for_metrics`: False
|
365 |
+
- `eval_do_concat_batches`: True
|
366 |
+
- `fp16_backend`: auto
|
367 |
+
- `push_to_hub_model_id`: None
|
368 |
+
- `push_to_hub_organization`: None
|
369 |
+
- `mp_parameters`:
|
370 |
+
- `auto_find_batch_size`: False
|
371 |
+
- `full_determinism`: False
|
372 |
+
- `torchdynamo`: None
|
373 |
+
- `ray_scope`: last
|
374 |
+
- `ddp_timeout`: 1800
|
375 |
+
- `torch_compile`: False
|
376 |
+
- `torch_compile_backend`: None
|
377 |
+
- `torch_compile_mode`: None
|
378 |
+
- `dispatch_batches`: None
|
379 |
+
- `split_batches`: None
|
380 |
+
- `include_tokens_per_second`: False
|
381 |
+
- `include_num_input_tokens_seen`: False
|
382 |
+
- `neftune_noise_alpha`: None
|
383 |
+
- `optim_target_modules`: None
|
384 |
+
- `batch_eval_metrics`: False
|
385 |
+
- `eval_on_start`: False
|
386 |
+
- `eval_use_gather_object`: False
|
387 |
+
- `batch_sampler`: batch_sampler
|
388 |
+
- `multi_dataset_batch_sampler`: round_robin
|
389 |
+
|
390 |
+
</details>
|
391 |
+
|
392 |
+
### Training Logs
|
393 |
+
| Epoch | Step | Training Loss | validation_spearman_max |
|
394 |
+
|:------:|:----:|:-------------:|:-----------------------:|
|
395 |
+
| 0.0464 | 100 | - | 0.6178 |
|
396 |
+
| 0.0928 | 200 | - | 0.6904 |
|
397 |
+
| 0.1392 | 300 | - | 0.7290 |
|
398 |
+
| 0.1856 | 400 | - | 0.7596 |
|
399 |
+
| 0.2320 | 500 | 0.0191 | 0.7715 |
|
400 |
+
| 0.2784 | 600 | - | 0.7783 |
|
401 |
+
| 0.3248 | 700 | - | 0.7851 |
|
402 |
+
| 0.3712 | 800 | - | 0.7885 |
|
403 |
+
| 0.4176 | 900 | - | 0.7942 |
|
404 |
+
| 0.4640 | 1000 | 0.0118 | 0.7965 |
|
405 |
+
| 0.5104 | 1100 | - | 0.8061 |
|
406 |
+
| 0.5568 | 1200 | - | 0.8035 |
|
407 |
+
| 0.6032 | 1300 | - | 0.8082 |
|
408 |
+
| 0.6497 | 1400 | - | 0.8105 |
|
409 |
+
|
410 |
+
|
411 |
+
### Framework Versions
|
412 |
+
- Python: 3.10.12
|
413 |
+
- Sentence Transformers: 3.2.0
|
414 |
+
- Transformers: 4.44.2
|
415 |
+
- PyTorch: 2.4.1+cu121
|
416 |
+
- Accelerate: 0.34.2
|
417 |
+
- Datasets: 3.0.1
|
418 |
+
- Tokenizers: 0.19.1
|
419 |
+
|
420 |
+
## Citation
|
421 |
+
|
422 |
+
### BibTeX
|
423 |
+
|
424 |
+
#### Sentence Transformers
|
425 |
+
```bibtex
|
426 |
+
@inproceedings{reimers-2019-sentence-bert,
|
427 |
+
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
428 |
+
author = "Reimers, Nils and Gurevych, Iryna",
|
429 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
430 |
+
month = "11",
|
431 |
+
year = "2019",
|
432 |
+
publisher = "Association for Computational Linguistics",
|
433 |
+
url = "https://arxiv.org/abs/1908.10084",
|
434 |
+
}
|
435 |
+
```
|
436 |
+
|
437 |
+
#### ContrastiveLoss
|
438 |
+
```bibtex
|
439 |
+
@inproceedings{hadsell2006dimensionality,
|
440 |
+
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
|
441 |
+
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
|
442 |
+
title={Dimensionality Reduction by Learning an Invariant Mapping},
|
443 |
+
year={2006},
|
444 |
+
volume={2},
|
445 |
+
number={},
|
446 |
+
pages={1735-1742},
|
447 |
+
doi={10.1109/CVPR.2006.100}
|
448 |
+
}
|
449 |
+
```
|
450 |
+
|
451 |
+
<!--
|
452 |
+
## Glossary
|
453 |
+
|
454 |
+
*Clearly define terms in order to be accessible across audiences.*
|
455 |
+
-->
|
456 |
+
|
457 |
+
<!--
|
458 |
+
## Model Card Authors
|
459 |
+
|
460 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
461 |
+
-->
|
462 |
+
|
463 |
+
<!--
|
464 |
+
## Model Card Contact
|
465 |
+
|
466 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
467 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/content/drive/MyDrive/\u1109\u1169\u110f\u1162\u11b8\u1103\u1175/LangGraph/Embedding_FT/gte_base_MIMICCXR_FT",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"position_embedding_type": "absolute",
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.44.2",
|
23 |
+
"type_vocab_size": 2,
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 30522
|
26 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "3.2.0",
|
4 |
+
"transformers": "4.44.2",
|
5 |
+
"pytorch": "2.4.1+cu121"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null,
|
9 |
+
"similarity_fn_name": null
|
10 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bbbb1d34e38828c654132678facc1d60d26129cf048614d0dfe46071daab8aef
|
3 |
+
size 437951328
|
modules.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"idx": 2,
|
16 |
+
"name": "2",
|
17 |
+
"path": "2_Normalize",
|
18 |
+
"type": "sentence_transformers.models.Normalize"
|
19 |
+
}
|
20 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"max_length": 128,
|
49 |
+
"model_max_length": 512,
|
50 |
+
"pad_to_multiple_of": null,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"pad_token_type_id": 0,
|
53 |
+
"padding_side": "right",
|
54 |
+
"sep_token": "[SEP]",
|
55 |
+
"stride": 0,
|
56 |
+
"strip_accents": null,
|
57 |
+
"tokenize_chinese_chars": true,
|
58 |
+
"tokenizer_class": "BertTokenizer",
|
59 |
+
"truncation_side": "right",
|
60 |
+
"truncation_strategy": "longest_first",
|
61 |
+
"unk_token": "[UNK]"
|
62 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|