File size: 3,102 Bytes
f2ae81e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
# this is .py for store constants 

DATA_DIR="./data/data.json"



MODEL_INFO = ["Model Name", "Language Model"]
AVG_INFO = ["Avg. All"]
ME_INFO=["Method Name", "Language Model"]

# KE 固定信息
KE_Data_INFO = ["FewNERD", "FewRel", "InstructIE-en", "MAVEN","WikiEvents"]

KE_TASK_INFO = ["Avg. All", "FewNERD", "FewRel", "InstructIE-en", "MAVEN","WikiEvents"]
KE_CSV_DIR = "./ke_files/result-kgc.csv"
DATA_COLUMN_NAMES =["locality","labels","concept","text"]
KE_TABLE_INTRODUCTION = """In the table below, we summarize each task performance of all the models. We use F1 score(%) as the primary evaluation metric for each tasks. 
    """
RESULT_COLUMN_NAMES= ["DataSet","Metric","Metric","ICE","AdaLoRA","MEND","ROME","MEMIT","FT-L","FT"]
DATA_STRUCT="""
Datasets	ZsRE	Wikirecent	Wikicounterfact	WikiBio
Train	10,000	570	1455	592
Test	1230	1266	885	1392
"""
TITLE = """# KnowEdit: a dataset for knowledge editing"""

BACKGROUND="""
Large Language Models (LLMs) have shown extraordinary capabilities in understanding and generating text that closely mirrors human communication. However, a primary limitation lies in the significant computational demands during training, arising from their extensive parameterization.There is an increasing interest in efficient, lightweight methods for onthe-fly model modifications. To this end, recent years have seen a burgeoning in the techniques of knowledge editing for LLMs, which aim to efficiently modify LLMs’ behaviors within specific domains while preserving overall performance across various inputs. 
"""

LEADERBORAD_INTRODUCTION = """
     This is the dataset for knowledge editing. It contains six tasks: ZsRE, Wiki<sub>recent</sub>, Wiki<sub>counterfact</sub>, WikiBio, ConvSent and Sanitation. This repo shows the former 4 tasks and you can get the data for ConvSent and Sanitation from their original papers.
    """
DATA_SCHEMA =""" {
  "subject": xxx,
  "target_new": xxx,
  "prompt": xxx,
  "portability":{
      "Logical_Generalization": [],
      ...
  }
  "locality":{
      "Relation_Specificity": [],
      ...
  }
}"""



CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""@article{tan2023evaluation,
    title={Evaluation of ChatGPT as a question answering system for answering complex questions},
    author={Yiming Tan and Dehai Min and Yu Li and Wenbo Li and Nan Hu and Yongrui Chen and Guilin Qi},
    journal={arXiv preprint arXiv:2303.07992},
    year={2023}
}
@article{gui2023InstructIE,
  author       = {Honghao Gui and Jintian Zhang and Hongbin Ye and Ningyu Zhang},
  title        = {InstructIE: {A} Chinese Instruction-based Information Extraction Dataset},
  journal      = {arXiv preprint arXiv:2303.07992},
  year         = {2023}
}
@article{yao2023edit,
  author       = {Yunzhi Yao and Peng Wang and Bozhong Tian and Siyuan Cheng and Zhoubo Li and Shumin Deng and Huajun Chen and  Ningyu Zhang},
  title        = {Editing Large Language Models: Problems, Methods, and Opportunities},
  journal      = {arXiv preprint arXiv:2305.13172},
  year         = {2023}
}
"""