Iker commited on
Commit
4130fa3
1 Parent(s): 85b17c6

Delete Multilingual-Opinion-Target-Extraction.py

Browse files
Multilingual-Opinion-Target-Extraction.py DELETED
@@ -1,169 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Multilingual Opinion Target Extraction: A Parallel Corpus for Multilingual Opinion Extraction"""
18
-
19
- import datasets
20
-
21
-
22
- logger = datasets.logging.get_logger(__name__)
23
-
24
-
25
- _CITATION = """\
26
- @inproceedings{garcia-ferrero-etal-2022-model,
27
- title = "Model and Data Transfer for Cross-Lingual Sequence Labelling in Zero-Resource Settings",
28
- author = "Garc{\'\i}a-Ferrero, Iker and
29
- Agerri, Rodrigo and
30
- Rigau, German",
31
- booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
32
- month = dec,
33
- year = "2022",
34
- address = "Abu Dhabi, United Arab Emirates",
35
- publisher = "Association for Computational Linguistics",
36
- url = "https://aclanthology.org/2022.findings-emnlp.478",
37
- pages = "6403--6416",
38
- }
39
- """
40
-
41
- _DESCRIPTION = """\
42
- SemEval-2016 Task 5: Aspect Based Sentiment Analysis data (https://alt.qcri.org/semeval2016/task5/) translated
43
- into Spanish, French, Russian and Turkish using DeepL. The annotations have been manually
44
- projected from English to the target languages.
45
- """
46
-
47
- _URL = "https://github.com/ikergarcia1996/Easy-Label-Projection/tree/main/data/absa_datasets/"
48
-
49
-
50
- class MOTEConfig(datasets.BuilderConfig):
51
- """BuilderConfig for mOTE"""
52
-
53
- def __init__(self, **kwargs):
54
- """BuilderConfig for mOTE.
55
- Args:
56
- **kwargs: keyword arguments forwarded to super.
57
- """
58
- super(MOTEConfig, self).__init__(**kwargs)
59
-
60
-
61
- class MOTE(datasets.GeneratorBasedBuilder):
62
- """MOTE dataset."""
63
-
64
- BUILDER_CONFIGS = [
65
- MOTEConfig(
66
- name="en",
67
- version=datasets.Version("1.0.0"),
68
- description="MOTEConfig English dataset",
69
- ),
70
- MOTEConfig(
71
- name="es",
72
- version=datasets.Version("1.0.0"),
73
- description="MOTEConfig Spanish dataset",
74
- ),
75
- MOTEConfig(
76
- name="fr",
77
- version=datasets.Version("1.0.0"),
78
- description="MOTEConfig French dataset",
79
- ),
80
- MOTEConfig(
81
- name="ru",
82
- version=datasets.Version("1.0.0"),
83
- description="MOTEConfig Russian dataset",
84
- ),
85
- MOTEConfig(
86
- name="tr",
87
- version=datasets.Version("1.0.0"),
88
- description="MOTEConfig Turkish dataset",
89
- ),
90
- ]
91
-
92
- def _info(self):
93
- return datasets.DatasetInfo(
94
- description=_DESCRIPTION,
95
- features=datasets.Features(
96
- {
97
- "id": datasets.Value("string"),
98
- "tokens": datasets.Sequence(datasets.Value("string")),
99
- "ner_tags": datasets.Sequence(
100
- datasets.features.ClassLabel(
101
- names=[
102
- "O",
103
- "B-TARGET",
104
- "I-TARGET",
105
- ]
106
- )
107
- ),
108
- }
109
- ),
110
- supervised_keys=None,
111
- homepage="https://arxiv.org/abs/2210.12623",
112
- citation=_CITATION,
113
- )
114
-
115
- def _split_generators(self, dl_manager):
116
- """Returns SplitGenerators."""
117
- if self.config.name == "en":
118
- urls_to_download = {
119
- "train": f"{_URL}original/en/en.absa.train.tsv",
120
- "test": f"{_URL}original/en/en.absa.test.tsv",
121
- }
122
- else:
123
- urls_to_download = {
124
- "train": f"{_URL}manual_projections/en2{self.config.name}/{self.config.name}.absa.train.tsv",
125
- "test": f"{_URL}manual_projections/en2{self.config.name}/{self.config.name}.absa.test.tsv",
126
- }
127
-
128
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
129
-
130
- return [
131
- datasets.SplitGenerator(
132
- name=datasets.Split.TRAIN,
133
- gen_kwargs={"filepath": downloaded_files["train"]},
134
- ),
135
- datasets.SplitGenerator(
136
- name=datasets.Split.TEST,
137
- gen_kwargs={"filepath": downloaded_files["test"]},
138
- ),
139
- ]
140
-
141
- def _generate_examples(self, filepath):
142
- logger.info("⏳ Generating examples from = %s", filepath)
143
- with open(filepath, encoding="utf-8") as f:
144
- guid = 0
145
- tokens = []
146
- ner_tags = []
147
- for line in f:
148
- line = line.strip()
149
- if line == "" or line == "\n":
150
- if tokens:
151
- yield guid, {
152
- "id": str(guid),
153
- "tokens": tokens,
154
- "ner_tags": ner_tags,
155
- }
156
- guid += 1
157
- tokens = []
158
- ner_tags = []
159
- else:
160
- splits = line.split()
161
- tokens.append(splits[0].strip())
162
- ner_tags.append(splits[1].strip())
163
- # last example
164
- if tokens:
165
- yield guid, {
166
- "id": str(guid),
167
- "tokens": tokens,
168
- "ner_tags": ner_tags,
169
- }