Commit
·
aa47040
1
Parent(s):
b9a198e
Update app.py
Browse files
app.py
CHANGED
@@ -43,7 +43,7 @@ def chunk_text_to_window_size_and_predict_proba(input_ids, attention_mask, total
|
|
43 |
window_length = 510
|
44 |
|
45 |
loop = True
|
46 |
-
|
47 |
while loop:
|
48 |
end = start + window_length
|
49 |
# If the end index exceeds total length, set the flag to False and adjust the end index
|
@@ -68,6 +68,8 @@ def chunk_text_to_window_size_and_predict_proba(input_ids, attention_mask, total
|
|
68 |
outputs = model(**input_dict)
|
69 |
|
70 |
decoded = tokenizer.decode(input_ids_chunk)
|
|
|
|
|
71 |
print("########:", decoded , ":##############")
|
72 |
|
73 |
probabilities = torch.nn.functional.softmax(outputs[0], dim = -1)
|
|
|
43 |
window_length = 510
|
44 |
|
45 |
loop = True
|
46 |
+
count = 1
|
47 |
while loop:
|
48 |
end = start + window_length
|
49 |
# If the end index exceeds total length, set the flag to False and adjust the end index
|
|
|
68 |
outputs = model(**input_dict)
|
69 |
|
70 |
decoded = tokenizer.decode(input_ids_chunk)
|
71 |
+
print("Loop Count:" + count)
|
72 |
+
count = count + 1
|
73 |
print("########:", decoded , ":##############")
|
74 |
|
75 |
probabilities = torch.nn.functional.softmax(outputs[0], dim = -1)
|