Update README.md
Browse files
README.md
CHANGED
@@ -360,9 +360,9 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
360 |
model = AutoModel.from_pretrained(model_name, trust_remote_code=True, attn_implementation="flash_attention_2", torch_dtype=torch.float16).to("cuda")
|
361 |
model.eval()
|
362 |
|
363 |
-
#
|
364 |
-
#
|
365 |
-
def mean_pooling(hidden,attention_mask):
|
366 |
s = torch.sum(hidden * attention_mask.unsqueeze(-1).float(), dim=1)
|
367 |
d = attention_mask.sum(dim=1, keepdim=True).float()
|
368 |
reps = s / d
|
|
|
360 |
model = AutoModel.from_pretrained(model_name, trust_remote_code=True, attn_implementation="flash_attention_2", torch_dtype=torch.float16).to("cuda")
|
361 |
model.eval()
|
362 |
|
363 |
+
# 由于在 `model.forward` 中缩放了最终隐层表示,此处的 mean pooling 实际上起到了 weighted mean pooling 的作用
|
364 |
+
# As we scale hidden states in `model.forward`, mean pooling here actually works as weighted mean pooling
|
365 |
+
def mean_pooling(hidden, attention_mask):
|
366 |
s = torch.sum(hidden * attention_mask.unsqueeze(-1).float(), dim=1)
|
367 |
d = attention_mask.sum(dim=1, keepdim=True).float()
|
368 |
reps = s / d
|