Spaces:
Runtime error
Runtime error
Rename `HuggingFaceH4` to `open-llm-leaderboard` org in modelcards (#14)
Browse files- Rename `HuggingFaceH4` to `open-llm-leaderboard` org in modelcards (74be950e8574ef8522c9e03e80ff886e89126bea)
- Update functions.py (b9e2a15895124b8c59957773cf17b5a3d256ef1b)
Co-authored-by: Lucain Pouget <Wauplin@users.noreply.huggingface.co>
- app.py +2 -2
- functions.py +3 -3
app.py
CHANGED
@@ -27,11 +27,11 @@ def refresh(how_much=43200): # default to 12 hour
|
|
27 |
refresh(600) # 10 minutes if any error happens
|
28 |
|
29 |
gradio_title="🧐 Open LLM Leaderboard Results PR Opener"
|
30 |
-
gradio_desc= """🎯 This tool's aim is to provide [Open LLM Leaderboard](https://huggingface.co/spaces/
|
31 |
|
32 |
## 💭 What Does This Tool Do:
|
33 |
|
34 |
-
- This tool adds the [Open LLM Leaderboard](https://huggingface.co/spaces/
|
35 |
|
36 |
- This tool also adds evaluation results as your model's metadata to showcase the evaluation results as a widget.
|
37 |
|
|
|
27 |
refresh(600) # 10 minutes if any error happens
|
28 |
|
29 |
gradio_title="🧐 Open LLM Leaderboard Results PR Opener"
|
30 |
+
gradio_desc= """🎯 This tool's aim is to provide [Open LLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) results in the model card.
|
31 |
|
32 |
## 💭 What Does This Tool Do:
|
33 |
|
34 |
+
- This tool adds the [Open LLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) result of your model at the end of your model card.
|
35 |
|
36 |
- This tool also adds evaluation results as your model's metadata to showcase the evaluation results as a widget.
|
37 |
|
functions.py
CHANGED
@@ -33,7 +33,7 @@ def get_details_url(repo):
|
|
33 |
|
34 |
|
35 |
def get_query_url(repo):
|
36 |
-
return f"https://huggingface.co/spaces/
|
37 |
|
38 |
|
39 |
def get_task_summary(results):
|
@@ -118,7 +118,7 @@ def get_eval_results(repo):
|
|
118 |
md_writer.value_matrix = [["Avg.", results['Average ⬆️']]] + [[v["dataset_name"], v["metric_value"]] for v in task_summary.values()]
|
119 |
|
120 |
text = f"""
|
121 |
-
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/
|
122 |
Detailed results can be found [here]({get_details_url(repo)})
|
123 |
|
124 |
{md_writer.dumps()}
|
@@ -130,7 +130,7 @@ def get_edited_yaml_readme(repo, token: str | None):
|
|
130 |
card = ModelCard.load(repo, token=token)
|
131 |
results = search(df, repo)
|
132 |
|
133 |
-
common = {"task_type": 'text-generation', "task_name": 'Text Generation', "source_name": "Open LLM Leaderboard", "source_url": f"https://huggingface.co/spaces/
|
134 |
|
135 |
tasks_results = get_task_summary(results)
|
136 |
|
|
|
33 |
|
34 |
|
35 |
def get_query_url(repo):
|
36 |
+
return f"https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query={repo}"
|
37 |
|
38 |
|
39 |
def get_task_summary(results):
|
|
|
118 |
md_writer.value_matrix = [["Avg.", results['Average ⬆️']]] + [[v["dataset_name"], v["metric_value"]] for v in task_summary.values()]
|
119 |
|
120 |
text = f"""
|
121 |
+
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)
|
122 |
Detailed results can be found [here]({get_details_url(repo)})
|
123 |
|
124 |
{md_writer.dumps()}
|
|
|
130 |
card = ModelCard.load(repo, token=token)
|
131 |
results = search(df, repo)
|
132 |
|
133 |
+
common = {"task_type": 'text-generation', "task_name": 'Text Generation', "source_name": "Open LLM Leaderboard", "source_url": f"https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query={repo}"}
|
134 |
|
135 |
tasks_results = get_task_summary(results)
|
136 |
|