patrickvonplaten commited on
Commit
48c8532
1 Parent(s): 44fabc2
Files changed (1) hide show
  1. eval.py +0 -127
eval.py DELETED
@@ -1,127 +0,0 @@
1
- #!/usr/bin/env python3
2
- from datasets import load_dataset, load_metric, Audio, Dataset
3
- from transformers import pipeline, AutoFeatureExtractor
4
- import re
5
- import argparse
6
- import unicodedata
7
- from typing import Dict
8
-
9
-
10
- def log_results(result: Dataset, args: Dict[str, str]):
11
- """ DO NOT CHANGE. This function computes and logs the result metrics. """
12
-
13
- log_outputs = args.log_outputs
14
- dataset_id = "_".join(args.dataset.split("/") + [args.config, args.split])
15
-
16
- # load metric
17
- wer = load_metric("wer")
18
- cer = load_metric("cer")
19
-
20
- # compute metrics
21
- wer_result = wer.compute(references=result["target"], predictions=result["prediction"])
22
- cer_result = cer.compute(references=result["target"], predictions=result["prediction"])
23
-
24
- # print & log results
25
- result_str = (
26
- f"WER: {wer_result}\n"
27
- f"CER: {cer_result}"
28
- )
29
- print(result_str)
30
-
31
- with open(f"{dataset_id}_eval_results.txt", "w") as f:
32
- f.write(result_str)
33
-
34
- # log all results in text file. Possibly interesting for analysis
35
- if log_outputs is not None:
36
- pred_file = f"log_{dataset_id}_predictions.txt"
37
- target_file = f"log_{dataset_id}_targets.txt"
38
-
39
- with open(pred_file, "w") as p, open(target_file, "w") as t:
40
-
41
- # mapping function to write output
42
- def write_to_file(batch, i):
43
- p.write(f"{i}" + "\n")
44
- p.write(batch["prediction"] + "\n")
45
- t.write(f"{i}" + "\n")
46
- t.write(batch["target"] + "\n")
47
-
48
- result.map(write_to_file, with_indices=True)
49
-
50
-
51
- def normalize_text(text: str) -> str:
52
- """ DO ADAPT FOR YOUR USE CASE. this function normalizes the target text. """
53
-
54
- chars_to_ignore_regex = '[,?.!\-\;\:\"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
55
-
56
- text = text.lower()
57
- # normalize non-standard (stylized) unicode characters
58
- text = unicodedata.normalize('NFKC', text)
59
- # remove punctuation
60
- text = re.sub(chars_to_ignore_regex, "", text)
61
-
62
- # Let's also make sure we split on all kinds of newlines, spaces, etc...
63
- text = " ".join(text.split())
64
-
65
- return text
66
-
67
-
68
- def main(args):
69
- # load dataset
70
- dataset = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=True)
71
-
72
- # for testing: only process the first two examples as a test
73
- dataset = dataset.select(range(10))
74
-
75
- # load processor
76
- feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id)
77
- sampling_rate = feature_extractor.sampling_rate
78
-
79
- # resample audio
80
- dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate))
81
-
82
- # load eval pipeline
83
- asr = pipeline("automatic-speech-recognition", model=args.model_id)
84
-
85
- # map function to decode audio
86
- def map_to_pred(batch):
87
- prediction = asr(batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s)
88
-
89
- batch["prediction"] = prediction["text"]
90
- batch["target"] = normalize_text(batch["sentence"])
91
- return batch
92
-
93
- # run inference on all examples
94
- result = dataset.map(map_to_pred, remove_columns=dataset.column_names)
95
-
96
- # compute and log_results
97
- # do not change function below
98
- log_results(result, args)
99
-
100
-
101
- if __name__ == "__main__":
102
- parser = argparse.ArgumentParser()
103
-
104
- parser.add_argument(
105
- "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
106
- )
107
- parser.add_argument(
108
- "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets"
109
- )
110
- parser.add_argument(
111
- "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
112
- )
113
- parser.add_argument(
114
- "--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`"
115
- )
116
- parser.add_argument(
117
- "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to None. For long audio files a good value would be 5.0 seconds."
118
- )
119
- parser.add_argument(
120
- "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to None. For long audio files a good value would be 1.0 seconds."
121
- )
122
- parser.add_argument(
123
- "--log_outputs", action='store_true', help="If defined, write outputs to log file for analysis."
124
- )
125
- args = parser.parse_args()
126
-
127
- main(args)