Update src/about.py
Browse files- src/about.py +2 -1
src/about.py
CHANGED
@@ -31,8 +31,9 @@ The Fino1 Leaderboard evaluates the performance of various LLMs, including gener
|
|
31 |
# Which evaluations are you running? how can people reproduce what you have?
|
32 |
LLM_BENCHMARKS_TEXT = f"""
|
33 |
## How it works
|
34 |
-
We used the framework from https://github.com/
|
35 |
And evaluation method from https://github.com/yale-nlp/DocMath-Eval are used to evaluate the performance of all models.
|
|
|
36 |
|
37 |
"""
|
38 |
|
|
|
31 |
# Which evaluations are you running? how can people reproduce what you have?
|
32 |
LLM_BENCHMARKS_TEXT = f"""
|
33 |
## How it works
|
34 |
+
We used the framework from https://github.com/The-FinAI/FinBen to do the inference.
|
35 |
And evaluation method from https://github.com/yale-nlp/DocMath-Eval are used to evaluate the performance of all models.
|
36 |
+
For more details of the evaluation datasets, please check https://github.com/The-FinAI/Fino1 for more details.
|
37 |
|
38 |
"""
|
39 |
|