Spaces:
Sleeping
Sleeping
made progress bar optional
Browse filesPREVIOUSLY:
progress bar always showed
NOW:
progress using 'verbose=False' argument disables progress bar
semncg.py
CHANGED
@@ -415,7 +415,7 @@ class SemNCG(evaluate.Metric):
|
|
415 |
`tokenize_sentences`=False -> references: List[List[str]]
|
416 |
k (int, optional): The rank threshold used for evaluating gains (typically top-k sentences). Default is 3.
|
417 |
gpu (DEVICE_TYPE, optional): Whether to use GPU for computation. Default is False.
|
418 |
-
verbose (bool, optional): Whether to print verbose logs. Default is False.
|
419 |
batch_size (int, optional): The batch size for encoding sentences. Default is 32.
|
420 |
tokenize_sentences (bool, optional): Whether to tokenize sentences. If True, sentences are tokenized before
|
421 |
processing. Default is True.
|
@@ -484,7 +484,10 @@ class SemNCG(evaluate.Metric):
|
|
484 |
iterable_obj = zip(predictions, references, documents)
|
485 |
|
486 |
out = []
|
487 |
-
for idx, (pred, ref, doc) in tqdm(
|
|
|
|
|
|
|
488 |
|
489 |
if not pre_compute_embeddings: # Compute embeddings
|
490 |
ref_sentences = tokenize_and_prep_document(ref, tokenize_sentences)
|
|
|
415 |
`tokenize_sentences`=False -> references: List[List[str]]
|
416 |
k (int, optional): The rank threshold used for evaluating gains (typically top-k sentences). Default is 3.
|
417 |
gpu (DEVICE_TYPE, optional): Whether to use GPU for computation. Default is False.
|
418 |
+
verbose (bool, optional): Whether to print verbose logs and use a progress bar. Default is False.
|
419 |
batch_size (int, optional): The batch size for encoding sentences. Default is 32.
|
420 |
tokenize_sentences (bool, optional): Whether to tokenize sentences. If True, sentences are tokenized before
|
421 |
processing. Default is True.
|
|
|
484 |
iterable_obj = zip(predictions, references, documents)
|
485 |
|
486 |
out = []
|
487 |
+
for idx, (pred, ref, doc) in tqdm(
|
488 |
+
enumerate(iterable_obj),
|
489 |
+
total=N,
|
490 |
+
disable=not verbose):
|
491 |
|
492 |
if not pre_compute_embeddings: # Compute embeddings
|
493 |
ref_sentences = tokenize_and_prep_document(ref, tokenize_sentences)
|