LeroyDyer commited on
Commit
9afe345
1 Parent(s): ada3e3a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +107 -1
README.md CHANGED
@@ -25,4 +25,110 @@ Used as the Boss of Other Agents!
25
  SOmeHow the best at testing !!!
26
  ohters may contain more paradigms and even data ... but somehow this one is top at leaderboard testing !
27
 
28
- VERY GOOD MODEL !!!!! (HIGH SCORES) - 78.9 Average
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  SOmeHow the best at testing !!!
26
  ohters may contain more paradigms and even data ... but somehow this one is top at leaderboard testing !
27
 
28
+ VERY GOOD MODEL !!!!! (HIGH SCORES) - 78.9 Average
29
+
30
+
31
+
32
+ @misc{open-llm-leaderboard-v2,
33
+ author = {Clémentine Fourrier and Nathan Habib and Alina Lozovskaya and Konrad Szafer and Thomas Wolf},
34
+ title = {Open LLM Leaderboard v2},
35
+ year = {2024},
36
+ publisher = {Hugging Face},
37
+ howpublished = "\url{https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard}",
38
+ }
39
+
40
+ @software{eval-harness,
41
+ author = {Gao, Leo and
42
+ Tow, Jonathan and
43
+ Biderman, Stella and
44
+ Black, Sid and
45
+ DiPofi, Anthony and
46
+ Foster, Charles and
47
+ Golding, Laurence and
48
+ Hsu, Jeffrey and
49
+ McDonell, Kyle and
50
+ Muennighoff, Niklas and
51
+ Phang, Jason and
52
+ Reynolds, Laria and
53
+ Tang, Eric and
54
+ Thite, Anish and
55
+ Wang, Ben and
56
+ Wang, Kevin and
57
+ Zou, Andy},
58
+ title = {A framework for few-shot language model evaluation},
59
+ month = sep,
60
+ year = 2021,
61
+ publisher = {Zenodo},
62
+ version = {v0.0.1},
63
+ doi = {10.5281/zenodo.5371628},
64
+ url = {https://doi.org/10.5281/zenodo.5371628},
65
+ }
66
+
67
+ @misc{zhou2023instructionfollowingevaluationlargelanguage,
68
+ title={Instruction-Following Evaluation for Large Language Models},
69
+ author={Jeffrey Zhou and Tianjian Lu and Swaroop Mishra and Siddhartha Brahma and Sujoy Basu and Yi Luan and Denny Zhou and Le Hou},
70
+ year={2023},
71
+ eprint={2311.07911},
72
+ archivePrefix={arXiv},
73
+ primaryClass={cs.CL},
74
+ url={https://arxiv.org/abs/2311.07911},
75
+ }
76
+
77
+ @misc{suzgun2022challengingbigbenchtaskschainofthought,
78
+ title={Challenging BIG-Bench Tasks and Whether Chain-of-Thought Can Solve Them},
79
+ author={Mirac Suzgun and Nathan Scales and Nathanael Schärli and Sebastian Gehrmann and Yi Tay and Hyung Won Chung and Aakanksha Chowdhery and Quoc V. Le and Ed H. Chi and Denny Zhou and Jason Wei},
80
+ year={2022},
81
+ eprint={2210.09261},
82
+ archivePrefix={arXiv},
83
+ primaryClass={cs.CL},
84
+ url={https://arxiv.org/abs/2210.09261},
85
+ }
86
+
87
+ @misc{hendrycks2021measuringmathematicalproblemsolving,
88
+ title={Measuring Mathematical Problem Solving With the MATH Dataset},
89
+ author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt},
90
+ year={2021},
91
+ eprint={2103.03874},
92
+ archivePrefix={arXiv},
93
+ primaryClass={cs.LG},
94
+ url={https://arxiv.org/abs/2103.03874},
95
+ }
96
+
97
+ @misc{rein2023gpqagraduatelevelgoogleproofqa,
98
+ title={GPQA: A Graduate-Level Google-Proof Q&A Benchmark},
99
+ author={David Rein and Betty Li Hou and Asa Cooper Stickland and Jackson Petty and Richard Yuanzhe Pang and Julien Dirani and Julian Michael and Samuel R. Bowman},
100
+ year={2023},
101
+ eprint={2311.12022},
102
+ archivePrefix={arXiv},
103
+ primaryClass={cs.AI},
104
+ url={https://arxiv.org/abs/2311.12022},
105
+ }
106
+
107
+ @misc{sprague2024musrtestinglimitschainofthought,
108
+ title={MuSR: Testing the Limits of Chain-of-thought with Multistep Soft Reasoning},
109
+ author={Zayne Sprague and Xi Ye and Kaj Bostrom and Swarat Chaudhuri and Greg Durrett},
110
+ year={2024},
111
+ eprint={2310.16049},
112
+ archivePrefix={arXiv},
113
+ primaryClass={cs.CL},
114
+ url={https://arxiv.org/abs/2310.16049},
115
+ }
116
+
117
+ @misc{wang2024mmluprorobustchallengingmultitask,
118
+ title={MMLU-Pro: A More Robust and Challenging Multi-Task Language Understanding Benchmark},
119
+ author={Yubo Wang and Xueguang Ma and Ge Zhang and Yuansheng Ni and Abhranil Chandra and Shiguang Guo and Weiming Ren and Aaran Arulraj and Xuan He and Ziyan Jiang and Tianle Li and Max Ku and Kai Wang and Alex Zhuang and Rongqi Fan and Xiang Yue and Wenhu Chen},
120
+ year={2024},
121
+ eprint={2406.01574},
122
+ archivePrefix={arXiv},
123
+ primaryClass={cs.CL},
124
+ url={https://arxiv.org/abs/2406.01574},
125
+ }
126
+
127
+ @misc{open-llm-leaderboard-v1,
128
+ author = {Edward Beeching and Clémentine Fourrier and Nathan Habib and Sheon Han and Nathan Lambert and Nazneen Rajani and Omar Sanseviero and Lewis Tunstall and Thomas Wolf},
129
+ title = {Open LLM Leaderboard (2023-2024)},
130
+ year = {2023},
131
+ publisher = {Hugging Face},
132
+ howpublished = "\url{https://huggingface.co/spaces/open-llm-leaderboard-old/open_llm_leaderboard}"
133
+ }
134
+