Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
asahi417 commited on
Commit
d0dad77
1 Parent(s): b641054

fix readme

Browse files
Files changed (1) hide show
  1. add_new_analogy.py +78 -78
add_new_analogy.py CHANGED
@@ -14,81 +14,81 @@ from datasets import load_dataset
14
  # f.write("\n".join([json.dumps(i) for i in analogy_data]))
15
 
16
 
17
- # create analogy from `relbert/t_rex_relational_similarity`
18
- data = load_dataset("relbert/t_rex_relational_similarity", "filter_unified.min_entity_1_max_predicate_100", split="test")
19
- df = data.to_pandas()
20
- df['negatives'] = [list(chain(
21
- *[[y.tolist() for y in x.tolist()] for x in df[df.relation_type != i]['positives'].tolist()] +
22
- [[y.tolist() for y in x.tolist()] for x in df[df.relation_type == i]['negatives'].tolist()])) for i in
23
- df['relation_type']]
24
- analogy_data = []
25
- for _, i in df.iterrows():
26
- if len(i['positives']) < 2:
27
- continue
28
- for m, (q, c) in enumerate(combinations(i['positives'], 2)):
29
- if m > 5:
30
- break
31
- negative = i['negatives']
32
- for n in range(6):
33
- seed(n)
34
- shuffle(negative)
35
- analogy_data.append({
36
- "stem": q.tolist(), "choice": [c.tolist()] + negative[:5], "answer": 0, "prefix": i["relation_type"]
37
- })
38
- os.makedirs("dataset/t_rex_relational_similarity", exist_ok=True)
39
- with open("dataset/t_rex_relational_similarity/test.jsonl", "w") as f:
40
- f.write("\n".join([json.dumps(i) for i in analogy_data]))
41
-
42
- data = load_dataset("relbert/t_rex_relational_similarity", "filter_unified.min_entity_4_max_predicate_100", split="validation")
43
- df = data.to_pandas()
44
- df['negatives'] = [list(chain(
45
- *[[y.tolist() for y in x.tolist()] for x in df[df.relation_type != i]['positives'].tolist()] +
46
- [[y.tolist() for y in x.tolist()] for x in df[df.relation_type == i]['negatives'].tolist()])) for i in
47
- df['relation_type']]
48
- analogy_data = []
49
- for _, i in df.iterrows():
50
- if len(i['positives']) < 5:
51
- continue
52
- for m, (q, c) in enumerate(combinations(i['positives'], 2)):
53
- if m > 5:
54
- break
55
- negative = i['negatives']
56
- for n in range(3):
57
- seed(n)
58
- shuffle(negative)
59
- analogy_data.append({
60
- "stem": q.tolist(), "choice": [c.tolist()] + negative[:5], "answer": 0, "prefix": i["relation_type"]
61
- })
62
- os.makedirs("dataset/t_rex_relational_similarity", exist_ok=True)
63
- with open("dataset/t_rex_relational_similarity/valid.jsonl", "w") as f:
64
- f.write("\n".join([json.dumps(i) for i in analogy_data]))
65
-
66
- # create analogy from `relbert/conceptnet_relational_similarity`
67
- for s in ['test', 'validation']:
68
- data = load_dataset("relbert/conceptnet_relational_similarity", split=s)
69
- df = data.to_pandas()
70
- df['negatives'] = [list(chain(
71
- *[[y.tolist() for y in x.tolist()] for x in df[df.relation_type != i]['positives'].tolist()] +
72
- [[y.tolist() for y in x.tolist()] for x in df[df.relation_type == i]['negatives'].tolist()])) for i in
73
- df['relation_type']]
74
-
75
- analogy_data = []
76
-
77
- for _, i in df.iterrows():
78
-
79
- if len(i['positives']) < 2:
80
- continue
81
- for m, (q, c) in enumerate(combinations(i['positives'], 2)):
82
- if m > 5:
83
- break
84
- negative = i['negatives']
85
- for n in range(6):
86
- seed(n)
87
- shuffle(negative)
88
- analogy_data.append({
89
- "stem": q.tolist(), "choice": [c.tolist()] + negative[:5], "answer": 0, "prefix": i["relation_type"]
90
- })
91
- print(len(analogy_data))
92
- os.makedirs("dataset/conceptnet_relational_similarity", exist_ok=True)
93
- with open(f"dataset/conceptnet_relational_similarity/{s if s == 'test' else 'valid'}.jsonl", "w") as f:
94
- f.write("\n".join([json.dumps(i) for i in analogy_data]))
 
14
  # f.write("\n".join([json.dumps(i) for i in analogy_data]))
15
 
16
 
17
+ # # create analogy from `relbert/t_rex_relational_similarity`
18
+ # data = load_dataset("relbert/t_rex_relational_similarity", "filter_unified.min_entity_1_max_predicate_100", split="test")
19
+ # df = data.to_pandas()
20
+ # df['negatives'] = [list(chain(
21
+ # *[[y.tolist() for y in x.tolist()] for x in df[df.relation_type != i]['positives'].tolist()] +
22
+ # [[y.tolist() for y in x.tolist()] for x in df[df.relation_type == i]['negatives'].tolist()])) for i in
23
+ # df['relation_type']]
24
+ # analogy_data = []
25
+ # for _, i in df.iterrows():
26
+ # if len(i['positives']) < 2:
27
+ # continue
28
+ # for m, (q, c) in enumerate(combinations(i['positives'], 2)):
29
+ # if m > 5:
30
+ # break
31
+ # negative = i['negatives']
32
+ # for n in range(6):
33
+ # seed(n)
34
+ # shuffle(negative)
35
+ # analogy_data.append({
36
+ # "stem": q.tolist(), "choice": [c.tolist()] + negative[:5], "answer": 0, "prefix": i["relation_type"]
37
+ # })
38
+ # os.makedirs("dataset/t_rex_relational_similarity", exist_ok=True)
39
+ # with open("dataset/t_rex_relational_similarity/test.jsonl", "w") as f:
40
+ # f.write("\n".join([json.dumps(i) for i in analogy_data]))
41
+ #
42
+ # data = load_dataset("relbert/t_rex_relational_similarity", "filter_unified.min_entity_4_max_predicate_100", split="validation")
43
+ # df = data.to_pandas()
44
+ # df['negatives'] = [list(chain(
45
+ # *[[y.tolist() for y in x.tolist()] for x in df[df.relation_type != i]['positives'].tolist()] +
46
+ # [[y.tolist() for y in x.tolist()] for x in df[df.relation_type == i]['negatives'].tolist()])) for i in
47
+ # df['relation_type']]
48
+ # analogy_data = []
49
+ # for _, i in df.iterrows():
50
+ # if len(i['positives']) < 5:
51
+ # continue
52
+ # for m, (q, c) in enumerate(combinations(i['positives'], 2)):
53
+ # if m > 5:
54
+ # break
55
+ # negative = i['negatives']
56
+ # for n in range(3):
57
+ # seed(n)
58
+ # shuffle(negative)
59
+ # analogy_data.append({
60
+ # "stem": q.tolist(), "choice": [c.tolist()] + negative[:5], "answer": 0, "prefix": i["relation_type"]
61
+ # })
62
+ # os.makedirs("dataset/t_rex_relational_similarity", exist_ok=True)
63
+ # with open("dataset/t_rex_relational_similarity/valid.jsonl", "w") as f:
64
+ # f.write("\n".join([json.dumps(i) for i in analogy_data]))
65
+ #
66
+ # # create analogy from `relbert/conceptnet_relational_similarity`
67
+ # for s in ['test', 'validation']:
68
+ # data = load_dataset("relbert/conceptnet_relational_similarity", split=s)
69
+ # df = data.to_pandas()
70
+ # df['negatives'] = [list(chain(
71
+ # *[[y.tolist() for y in x.tolist()] for x in df[df.relation_type != i]['positives'].tolist()] +
72
+ # [[y.tolist() for y in x.tolist()] for x in df[df.relation_type == i]['negatives'].tolist()])) for i in
73
+ # df['relation_type']]
74
+ #
75
+ # analogy_data = []
76
+ #
77
+ # for _, i in df.iterrows():
78
+ #
79
+ # if len(i['positives']) < 2:
80
+ # continue
81
+ # for m, (q, c) in enumerate(combinations(i['positives'], 2)):
82
+ # if m > 5:
83
+ # break
84
+ # negative = i['negatives']
85
+ # for n in range(6):
86
+ # seed(n)
87
+ # shuffle(negative)
88
+ # analogy_data.append({
89
+ # "stem": q.tolist(), "choice": [c.tolist()] + negative[:5], "answer": 0, "prefix": i["relation_type"]
90
+ # })
91
+ # print(len(analogy_data))
92
+ # os.makedirs("dataset/conceptnet_relational_similarity", exist_ok=True)
93
+ # with open(f"dataset/conceptnet_relational_similarity/{s if s == 'test' else 'valid'}.jsonl", "w") as f:
94
+ # f.write("\n".join([json.dumps(i) for i in analogy_data]))