typo fix for get_probabilities (#4)
Browse files- typo fix for get_probabilities (e705b8a7651724b33bd4eb227a6ff4697aeaba82)
Co-authored-by: Manish Nagireddy <mnagired@users.noreply.huggingface.co>
README.md
CHANGED
@@ -76,7 +76,7 @@ def parse_output(output, input_len):
|
|
76 |
list_index_logprobs_i = [torch.topk(token_i, k=nlogprobs, largest=True, sorted=True)
|
77 |
for token_i in list(output.scores)[:-1]]
|
78 |
if list_index_logprobs_i is not None:
|
79 |
-
prob =
|
80 |
prob_of_risk = prob[1]
|
81 |
|
82 |
res = tokenizer.decode(output.sequences[:,input_len:][0],skip_special_tokens=True).strip()
|
@@ -89,7 +89,7 @@ def parse_output(output, input_len):
|
|
89 |
|
90 |
return label, prob_of_risk.item()
|
91 |
|
92 |
-
def
|
93 |
safe_token_prob = 1e-50
|
94 |
unsafe_token_prob = 1e-50
|
95 |
for gen_token_i in logprobs:
|
|
|
76 |
list_index_logprobs_i = [torch.topk(token_i, k=nlogprobs, largest=True, sorted=True)
|
77 |
for token_i in list(output.scores)[:-1]]
|
78 |
if list_index_logprobs_i is not None:
|
79 |
+
prob = get_probabilities(list_index_logprobs_i)
|
80 |
prob_of_risk = prob[1]
|
81 |
|
82 |
res = tokenizer.decode(output.sequences[:,input_len:][0],skip_special_tokens=True).strip()
|
|
|
89 |
|
90 |
return label, prob_of_risk.item()
|
91 |
|
92 |
+
def get_probabilities(logprobs):
|
93 |
safe_token_prob = 1e-50
|
94 |
unsafe_token_prob = 1e-50
|
95 |
for gen_token_i in logprobs:
|