Upload constants.py
Browse files- constants.py +69 -0
constants.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# this is .py for store constants
|
2 |
+
|
3 |
+
DATA_DIR="./data/data.json"
|
4 |
+
|
5 |
+
|
6 |
+
|
7 |
+
MODEL_INFO = ["Model Name", "Language Model"]
|
8 |
+
AVG_INFO = ["Avg. All"]
|
9 |
+
ME_INFO=["Method Name", "Language Model"]
|
10 |
+
|
11 |
+
# KE 固定信息
|
12 |
+
KE_Data_INFO = ["FewNERD", "FewRel", "InstructIE-en", "MAVEN","WikiEvents"]
|
13 |
+
|
14 |
+
KE_TASK_INFO = ["Avg. All", "FewNERD", "FewRel", "InstructIE-en", "MAVEN","WikiEvents"]
|
15 |
+
KE_CSV_DIR = "./ke_files/result-kgc.csv"
|
16 |
+
DATA_COLUMN_NAMES =["locality","labels","concept","text"]
|
17 |
+
KE_TABLE_INTRODUCTION = """In the table below, we summarize each task performance of all the models. We use F1 score(%) as the primary evaluation metric for each tasks.
|
18 |
+
"""
|
19 |
+
RESULT_COLUMN_NAMES= ["DataSet","Metric","Metric","ICE","AdaLoRA","MEND","ROME","MEMIT","FT-L","FT"]
|
20 |
+
DATA_STRUCT="""
|
21 |
+
Datasets ZsRE Wikirecent Wikicounterfact WikiBio
|
22 |
+
Train 10,000 570 1455 592
|
23 |
+
Test 1230 1266 885 1392
|
24 |
+
"""
|
25 |
+
TITLE = """# KnowEdit: a dataset for knowledge editing"""
|
26 |
+
|
27 |
+
BACKGROUND="""
|
28 |
+
Large Language Models (LLMs) have shown extraordinary capabilities in understanding and generating text that closely mirrors human communication. However, a primary limitation lies in the significant computational demands during training, arising from their extensive parameterization.There is an increasing interest in efficient, lightweight methods for onthe-fly model modifications. To this end, recent years have seen a burgeoning in the techniques of knowledge editing for LLMs, which aim to efficiently modify LLMs’ behaviors within specific domains while preserving overall performance across various inputs.
|
29 |
+
"""
|
30 |
+
|
31 |
+
LEADERBORAD_INTRODUCTION = """
|
32 |
+
This is the dataset for knowledge editing. It contains six tasks: ZsRE, Wiki<sub>recent</sub>, Wiki<sub>counterfact</sub>, WikiBio, ConvSent and Sanitation. This repo shows the former 4 tasks and you can get the data for ConvSent and Sanitation from their original papers.
|
33 |
+
"""
|
34 |
+
DATA_SCHEMA =""" {
|
35 |
+
"subject": xxx,
|
36 |
+
"target_new": xxx,
|
37 |
+
"prompt": xxx,
|
38 |
+
"portability":{
|
39 |
+
"Logical_Generalization": [],
|
40 |
+
...
|
41 |
+
}
|
42 |
+
"locality":{
|
43 |
+
"Relation_Specificity": [],
|
44 |
+
...
|
45 |
+
}
|
46 |
+
}"""
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
51 |
+
CITATION_BUTTON_TEXT = r"""@article{tan2023evaluation,
|
52 |
+
title={Evaluation of ChatGPT as a question answering system for answering complex questions},
|
53 |
+
author={Yiming Tan and Dehai Min and Yu Li and Wenbo Li and Nan Hu and Yongrui Chen and Guilin Qi},
|
54 |
+
journal={arXiv preprint arXiv:2303.07992},
|
55 |
+
year={2023}
|
56 |
+
}
|
57 |
+
@article{gui2023InstructIE,
|
58 |
+
author = {Honghao Gui and Jintian Zhang and Hongbin Ye and Ningyu Zhang},
|
59 |
+
title = {InstructIE: {A} Chinese Instruction-based Information Extraction Dataset},
|
60 |
+
journal = {arXiv preprint arXiv:2303.07992},
|
61 |
+
year = {2023}
|
62 |
+
}
|
63 |
+
@article{yao2023edit,
|
64 |
+
author = {Yunzhi Yao and Peng Wang and Bozhong Tian and Siyuan Cheng and Zhoubo Li and Shumin Deng and Huajun Chen and Ningyu Zhang},
|
65 |
+
title = {Editing Large Language Models: Problems, Methods, and Opportunities},
|
66 |
+
journal = {arXiv preprint arXiv:2305.13172},
|
67 |
+
year = {2023}
|
68 |
+
}
|
69 |
+
"""
|