Spaces:
Runtime error
Runtime error
Additional metadata fields!
Browse files- app.py +80 -20
- requirements.txt +2 -1
app.py
CHANGED
@@ -153,8 +153,10 @@ def main():
|
|
153 |
# EVAL RESULTS
|
154 |
############################
|
155 |
st.markdown("### Evaluation results")
|
156 |
-
st.markdown(
|
157 |
-
|
|
|
|
|
158 |
if "results" not in metadata["model-index"][0]:
|
159 |
metadata["model-index"][0]["results"] = []
|
160 |
|
@@ -166,35 +168,80 @@ def main():
|
|
166 |
results_yaml = st_ace(value=results_yaml, language="yaml")
|
167 |
metadata["model-index"][0]["results"] = try_parse_yaml(results_yaml)
|
168 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
with st.form(key="eval_form"):
|
170 |
-
dataset_name = st.text_input(
|
171 |
-
label="Full name of the dataset", placeholder="Common Voice 8.0"
|
172 |
-
)
|
173 |
dataset_path = st.text_input(
|
174 |
-
label="Dataset path / id",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
)
|
176 |
dataset_config = st.text_input(
|
177 |
-
label="Dataset
|
178 |
placeholder="en",
|
179 |
)
|
180 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
metric2name = {"wer": "Word Error Rate", "cer": "Character Error Rate"}
|
182 |
metric_type = st.selectbox(
|
183 |
label="Metric",
|
184 |
options=["wer", "cer"],
|
185 |
format_func=lambda key: metric2name[key],
|
186 |
)
|
|
|
|
|
|
|
|
|
|
|
187 |
metric_value = st.text_input(
|
188 |
-
label="Metric value
|
189 |
placeholder="12.34",
|
190 |
)
|
191 |
-
try:
|
192 |
-
|
193 |
-
except ValueError:
|
194 |
-
|
|
|
|
|
195 |
|
196 |
-
submitted = st.form_submit_button("
|
197 |
-
if
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
metric = {
|
199 |
"name": metric_name,
|
200 |
"type": metric_type,
|
@@ -206,7 +253,10 @@ def main():
|
|
206 |
existing_dataset = existing_result["dataset"]
|
207 |
if (
|
208 |
existing_dataset["type"] == dataset_path
|
209 |
-
and
|
|
|
|
|
|
|
210 |
):
|
211 |
if "metrics" not in existing_result:
|
212 |
existing_result["metrics"] = []
|
@@ -223,7 +273,9 @@ def main():
|
|
223 |
"dataset": {
|
224 |
"name": dataset_name,
|
225 |
"type": dataset_path,
|
226 |
-
"
|
|
|
|
|
227 |
},
|
228 |
"metrics": [metric],
|
229 |
}
|
@@ -238,8 +290,14 @@ def main():
|
|
238 |
)
|
239 |
results_yaml = st_ace(value=results_yaml, language="yaml")
|
240 |
metadata["model-index"][0]["results"] = try_parse_yaml(results_yaml)
|
241 |
-
st.success(
|
242 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
243 |
|
244 |
############################
|
245 |
# FINAL YAML
|
@@ -247,8 +305,10 @@ def main():
|
|
247 |
st.markdown("## 3. Copy the generated metadata")
|
248 |
st.markdown(
|
249 |
"Copy the YAML from below and replace the metadata at the top of your model's README.md here: "
|
250 |
-
f"https://huggingface.co/{model_id}/
|
251 |
)
|
|
|
|
|
252 |
|
253 |
new_yaml = yaml.dump(metadata, sort_keys=False, line_break="\n")
|
254 |
st.markdown(f"```yaml\n---\n{new_yaml}---\n```")
|
|
|
153 |
# EVAL RESULTS
|
154 |
############################
|
155 |
st.markdown("### Evaluation results")
|
156 |
+
st.markdown(
|
157 |
+
"To edit the metrics, you can either use the YAML editor below, or add new metrics using the handy "
|
158 |
+
"form under it."
|
159 |
+
)
|
160 |
if "results" not in metadata["model-index"][0]:
|
161 |
metadata["model-index"][0]["results"] = []
|
162 |
|
|
|
168 |
results_yaml = st_ace(value=results_yaml, language="yaml")
|
169 |
metadata["model-index"][0]["results"] = try_parse_yaml(results_yaml)
|
170 |
|
171 |
+
dataset_path_kwargs = {}
|
172 |
+
dataset_name_kwargs = {}
|
173 |
+
if (
|
174 |
+
len(metadata["model-index"][0]["results"]) > 0
|
175 |
+
and "dataset" in metadata["model-index"][0]["results"]
|
176 |
+
):
|
177 |
+
if "type" in metadata["model-index"][0]["results"]["dataset"]:
|
178 |
+
dataset_path_kwargs["value"] = metadata["model-index"][0]["results"][
|
179 |
+
"dataset"
|
180 |
+
]["type"]
|
181 |
+
if "name" in metadata["model-index"][0]["results"]["dataset"]:
|
182 |
+
dataset_name_kwargs["value"] = metadata["model-index"][0]["results"][
|
183 |
+
"dataset"
|
184 |
+
]["type"]
|
185 |
+
|
186 |
with st.form(key="eval_form"):
|
|
|
|
|
|
|
187 |
dataset_path = st.text_input(
|
188 |
+
label="Dataset path / id",
|
189 |
+
placeholder="mozilla-foundation/common_voice_8_0",
|
190 |
+
**dataset_path_kwargs,
|
191 |
+
)
|
192 |
+
dataset_name = st.text_input(
|
193 |
+
label="A pretty name for the dataset. Examples: 'Common Voice 9.0 (French)', 'LibriSpeech (clean)'",
|
194 |
+
placeholder="Common Voice 8.0 (French)",
|
195 |
+
**dataset_name_kwargs,
|
196 |
)
|
197 |
dataset_config = st.text_input(
|
198 |
+
label="Dataset configuration. Examples: clean, other, en, pt-BR",
|
199 |
placeholder="en",
|
200 |
)
|
201 |
+
dataset_language = st.text_input(
|
202 |
+
label="Dataset language. Examples: en, pt-BR",
|
203 |
+
value=languages[0],
|
204 |
+
placeholder="en",
|
205 |
+
)
|
206 |
+
dataset_split = st.text_input(
|
207 |
+
label="Dataset split. Examples: test, validation",
|
208 |
+
value="test",
|
209 |
+
placeholder="test",
|
210 |
+
)
|
211 |
metric2name = {"wer": "Word Error Rate", "cer": "Character Error Rate"}
|
212 |
metric_type = st.selectbox(
|
213 |
label="Metric",
|
214 |
options=["wer", "cer"],
|
215 |
format_func=lambda key: metric2name[key],
|
216 |
)
|
217 |
+
metric_name = st.text_input(
|
218 |
+
label="A pretty name for the metric. Example: Test WER (+LM)",
|
219 |
+
placeholder="Test WER",
|
220 |
+
value="Test WER",
|
221 |
+
)
|
222 |
metric_value = st.text_input(
|
223 |
+
label="Metric value. Use values in range 0.0 to 100.0.",
|
224 |
placeholder="12.34",
|
225 |
)
|
226 |
+
# try:
|
227 |
+
# metric_value = float(metric_value)
|
228 |
+
# except ValueError:
|
229 |
+
# st.error(
|
230 |
+
# f"Couldn't parse `{metric_value}`. Make sure it's a number from 0.0 to 100.0"
|
231 |
+
# )
|
232 |
|
233 |
+
submitted = st.form_submit_button("Add metric")
|
234 |
+
if (
|
235 |
+
submitted
|
236 |
+
and dataset_name
|
237 |
+
and dataset_path
|
238 |
+
and dataset_config
|
239 |
+
and dataset_split
|
240 |
+
and dataset_language
|
241 |
+
and metric_name
|
242 |
+
and metric_type
|
243 |
+
and metric_value
|
244 |
+
):
|
245 |
metric = {
|
246 |
"name": metric_name,
|
247 |
"type": metric_type,
|
|
|
253 |
existing_dataset = existing_result["dataset"]
|
254 |
if (
|
255 |
existing_dataset["type"] == dataset_path
|
256 |
+
and "config" in existing_dataset
|
257 |
+
and existing_dataset["config"] == dataset_config
|
258 |
+
and "split" in existing_dataset
|
259 |
+
and existing_dataset["split"] == dataset_split
|
260 |
):
|
261 |
if "metrics" not in existing_result:
|
262 |
existing_result["metrics"] = []
|
|
|
273 |
"dataset": {
|
274 |
"name": dataset_name,
|
275 |
"type": dataset_path,
|
276 |
+
"config": dataset_config,
|
277 |
+
"split": dataset_split,
|
278 |
+
"args": {"language": dataset_language},
|
279 |
},
|
280 |
"metrics": [metric],
|
281 |
}
|
|
|
290 |
)
|
291 |
results_yaml = st_ace(value=results_yaml, language="yaml")
|
292 |
metadata["model-index"][0]["results"] = try_parse_yaml(results_yaml)
|
293 |
+
st.success(
|
294 |
+
f"Added the metric for {dataset_path} - {dataset_config}! "
|
295 |
+
f"Check the result in the YAML editor above."
|
296 |
+
)
|
297 |
+
elif submitted:
|
298 |
+
st.error(
|
299 |
+
f"Make sure that you've filled the whole form before clicking 'Add metric'!"
|
300 |
+
)
|
301 |
|
302 |
############################
|
303 |
# FINAL YAML
|
|
|
305 |
st.markdown("## 3. Copy the generated metadata")
|
306 |
st.markdown(
|
307 |
"Copy the YAML from below and replace the metadata at the top of your model's README.md here: "
|
308 |
+
f"https://huggingface.co/{model_id}/edit/main/README.md"
|
309 |
)
|
310 |
+
st.markdown("For mor info on the metadata schema please refer to "
|
311 |
+
"https://raw.githubusercontent.com/huggingface/hub-docs/main/modelcard.md")
|
312 |
|
313 |
new_yaml = yaml.dump(metadata, sort_keys=False, line_break="\n")
|
314 |
st.markdown(f"```yaml\n---\n{new_yaml}---\n```")
|
requirements.txt
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
streamlit-tags
|
2 |
-
streamlit-ace
|
|
|
|
1 |
streamlit-tags
|
2 |
+
streamlit-ace
|
3 |
+
streamlit==1.5
|