Fix m2t
Browse files- src/leaderboard_formatting.py +3 -0
- src/tasks_content.py +1 -4
src/leaderboard_formatting.py
CHANGED
@@ -70,6 +70,9 @@ def get_columns_per_task(task_id: str) -> List[str]:
|
|
70 |
return ["Model Name", "Context Size", "Dataset Name", "Dataset"] + metrics_per_task + ["Availability", "Submitted By", "Resources"]
|
71 |
if task_id == 'bug_localization':
|
72 |
return ["Model Name", "Availability", "Context Size", "Dataset"] + metrics_per_task + ["Submitted By", "Resources"]
|
|
|
|
|
|
|
73 |
return ["Model Name", "Context Size", "Availability"] + metrics_per_task + ["Submitted By", "Resources"]
|
74 |
|
75 |
|
|
|
70 |
return ["Model Name", "Context Size", "Dataset Name", "Dataset"] + metrics_per_task + ["Availability", "Submitted By", "Resources"]
|
71 |
if task_id == 'bug_localization':
|
72 |
return ["Model Name", "Availability", "Context Size", "Dataset"] + metrics_per_task + ["Submitted By", "Resources"]
|
73 |
+
|
74 |
+
if task_id == 'module_summarization':
|
75 |
+
return ["Model Name", "Context Size"] + metrics_per_task + ["Submitted By", "Resources"]
|
76 |
return ["Model Name", "Context Size", "Availability"] + metrics_per_task + ["Submitted By", "Resources"]
|
77 |
|
78 |
|
src/tasks_content.py
CHANGED
@@ -38,7 +38,7 @@ TASKS_DESCRIPTIONS = {
|
|
38 |
We use new metric for evaluation:
|
39 |
* [CompScore](https://github.com/JetBrains-Research/lca-baselines/tree/module2text)
|
40 |
|
41 |
-
For further details on the dataset and the baselines from ποΈ Long Code Arena Team, refer to `module2text` folder in [our baselines repository](https://github.com/JetBrains-Research/lca-baselines)
|
42 |
""",
|
43 |
|
44 |
"library_usage": "cool description for Library Usage Examples Generation task",
|
@@ -76,7 +76,4 @@ def get_submission_text_files_for_task(task_pretty: Optional[str]) -> str:
|
|
76 |
if task_id == "commit_message_generation":
|
77 |
return f"""**{task_pretty} Instructions:**\n\n* Please, attach files in [JSONLines format](https://jsonlines.org/). For an example, check the predictions provided by ποΈ Long Code Arena Team in π€ [JetBrains-Research/lca-results](https://huggingface.co/datasets/JetBrains-Research/lca-results/tree/main/commit_message_generation/predictions). Make sure to include `"prediction"` and `"reference"` fields for each example, the rest are optional."""
|
78 |
|
79 |
-
if task_id == "module_summarization":
|
80 |
-
return f"""**{task_pretty} Instructions:**\n\n* Please, attach files in [JSONLines format](https://jsonlines.org/). For an example, check the predictions provided by ποΈ Long Code Arena Team in π€ [JetBrains-Research/lca-results](https://huggingface.co/datasets/JetBrains-Research/lca-results/tree/main/commit_message_generation/predictions). Make sure to include `"prediction"` and `"reference"` fields for each example, the rest are optional."""
|
81 |
-
|
82 |
return f"**{task_pretty} Instructions:**\n\n* π§ There are no instructions for the current task yet."
|
|
|
38 |
We use new metric for evaluation:
|
39 |
* [CompScore](https://github.com/JetBrains-Research/lca-baselines/tree/module2text)
|
40 |
|
41 |
+
For further details on the dataset and the baselines from ποΈ Long Code Arena Team, refer to `module2text` folder in [our baselines repository](https://github.com/JetBrains-Research/lca-baselines).
|
42 |
""",
|
43 |
|
44 |
"library_usage": "cool description for Library Usage Examples Generation task",
|
|
|
76 |
if task_id == "commit_message_generation":
|
77 |
return f"""**{task_pretty} Instructions:**\n\n* Please, attach files in [JSONLines format](https://jsonlines.org/). For an example, check the predictions provided by ποΈ Long Code Arena Team in π€ [JetBrains-Research/lca-results](https://huggingface.co/datasets/JetBrains-Research/lca-results/tree/main/commit_message_generation/predictions). Make sure to include `"prediction"` and `"reference"` fields for each example, the rest are optional."""
|
78 |
|
|
|
|
|
|
|
79 |
return f"**{task_pretty} Instructions:**\n\n* π§ There are no instructions for the current task yet."
|