diff --git a/README.md b/README.md index b25779b9db29949bcd960cfe303ae9505bc7cefe..a9db684a4a4e9479a3cdd231d0bd8706a08cf04c 100644 --- a/README.md +++ b/README.md @@ -1,347 +1,13 @@ -# DialogZoo - -To replicate data construction, three steps are required: -* Download data: ```bash scripts/download.sh``` -* Convert origin data into our unified format: ```bash scripts/convert_to_unified.sh``` +# Code directory ``` -{ - # Optional values: `single` or `multi`. Indicates whether it is a single-turn or multi-turn dialogue. - "turn": str, - - # The domains involved in the dialogue (a list because some dialogues involve multiple domains). - "domain": [], - - # The language of the dialogue, based on the original dataset annotations (e.g., en, fr, etc.). - "locale": str, - - # The dialogue, represented as a list where each element is a dictionary for a single turn. - "dialog": [ - { - # The roles involved in each turn. Some datasets may have multiple roles per turn, so it's a list. - # For datasets without role annotations: - # * Use `ROLE` for single-turn data. - # * Use `ROLE1`, `ROLE2`, etc., for multi-turn data. - "roles": [str, ...], - - # The text of the current turn. - "utterance": str, - - # Used for the "answer" in QA tasks. - "start": int, - "end": int, - "dialog_turn": int - - # Rewritten text corresponding to the current turn. - "rewritten": str, - - # Dialogue state, represented as a list where each element includes: - # Domain: Some datasets constrain slot-value pairs within specific domains. - # Intent: Some datasets constrain slot-value pairs within specific intents. - # Slot-value pairs: A list where each element includes a slot and its corresponding values. - # Slot name: A string. - # Values: A list where a slot may have multiple values. - # Each value includes four parts: the value itself, the normalized value, - # the character index in the current turn's text, and more. - # Relation: Some slots are equal to a value, while others are greater than a value. - # Defaults to "equal" if not specified. - # Requested slots: A list of slots that need to be queried but are not filled in the current state. - "belief_state": [ - { - # Intent - "intent": str, - # Slot-value pairs - "informed_slot_value_table": [ - { - # Slot name - "slot": str, - # Values - "values": [{ - # Actual value - "value": str, - # Normalized value - "cononical_value": str - }, ...], - # Slot-value relation - "relation": str, - }, - ... - ], - # Requested slots - "requested_slots": [], - # Domain - "domain": str, - }, ... - ], - - # Dialogue actions, represented as a list where each element includes: - # Domain: Some datasets constrain slot-value pairs within specific domains. - # Action: The actions involved in the current turn. - # Slot-value pairs: Same as in dialogue state. - "dialog_acts": [ - { - # Action - "act": str, - # Slot-value pairs - "slot_value_table": [ - { - # Slot name - "slot": str, - # Slot-value relation - "relation": str, - # Values - "values": [ - { - # Actual value - "value": str, - # Normalized value - "cononical_value": str, - # Start position - "start": int, - # End position - "end": int, - },... - ] - }, - ... - ], - # Domain - "domain": str, - }, - ... - ], - - # Slot filling - "slots_to_fill": { - "intent": str, - "slot_value_table": [ - { - "slot": str, - "values": [ - { - "value": str, - "start": int, - "end": int - } - ], - "relation": str, # '=', '<=', and so on - } - ] - }, - - # Named entity recognition - "named_entity_recognition": [ - { - "type": str, - "values": [ - { - "value": str, - "start": int, - "end": int - }, ... - ] - }, ... - ], - - "characters": [ - { - "value": str, - "start": int, - "end": int - } - ] - - # Intent detection - "active_intents": [str], - - # Query - "query" { - ... - }, - - # Query result - "querying_result": { - ... - }, - - # Recorded satisfied main items - "main_items": [], - - # Aspect Sentiment Triplet Extraction task, represented as a list where each element includes three parts: - # Target entity. - # Related sentiment. - # Words reflecting the sentiment. - "aspects": [ - { - # Target entity - "target": { - # Entity value - "value": str, - # Start position in the current turn's text - "start": int, - # End position in the current turn's text - "end": int - }, - - # Category of the target entity - "category": str, - - # Words reflecting the sentiment - "opinion": { - # Sentiment word - "value": str, - # Start position in the current turn's text - "start": int, - # End position in the current turn's text - "end": int - }, - # Related sentiment - "sentiment": str - } - ], - - "emotions": [ - { - "emotion": str, - "sentiment": "positive", "negative", or "ambiguous", - "evidences": [ - { - "turn": int, - "span": str, - "start": int, - "end": int - } - ], - "evidence_types": [str] - } - ], - - "kg_label": str, - - # Knowledge that may be required for each turn, used to select knowledge. - "knowledge_to_select": str, - - # SQL - "sql": str, - - # Rewritten text - "rewritten": str, - - "roles_to_select": [str], - }, - - ], - - # Summary derived from the entire dialogue. - "summary": str, - - # Entity relations determined from the entire dialogue. - "instance_relations": [ - { - "instance1": str, - "instance2": str, - "relations": [ - { - "relation": str, - "trigger": str - }, ... - ] - }, ... - ] - - # Role relations determined from the entire dialogue. - "role_relations": [ - { - "turn": int, - "relation": str - } - ], - - # Used in FriendsPersona to determine a character's persona based on the entire dialogue. - "role_personas": [ - { - "name": str, - "personas": [ - { - "persona": str, - "sentiment": int - }, ... - ] - } - ], - - # External knowledge required for the dialogue. - "knowledge": { - # `text`, `persona`, `kg`, or `schema`. - "type": str, - - # For `text`. - "value": str, - - # For `persona`, persona of all roles, used for personachat. - "value": [ - { - # Role name, matching the dialogue turn. - "role": str, - - # Persona description, which may include several sentences. - "description": [] - }, - ... - ] - - # For `kg`. - "value": { - # `directed` or `undirected`. - "direction": str, - - # Graph. - "graph": [ - { - # Source node. - "source": str, - - # Target node. - "target": str, - - # Relation. - "relation": str - }, - ... - ] - } - - # For `schema`. - "value": { - ... - } - - # For `dialogue`. - "value": { - "dialog": [], - "relations": [] - } - - # For `wiki`. - "value": { - ... - } - - # For `sql`. - "value": [ - { - "turn": int, - "sql": str, - "result": ... - }, ... - ], - - # For dialogues based on specific article excerpts, this field indicates the article and section titles. - "value": { - "article title": str, - "section title": str - }, - } -} - +. +|-- pretrain: pre-training package +|-- utils +| |-- data +| |-- logger +| |-- model +| |-- tokenizer +| `-- trainer +|-- 😀[TODO: some other tasks directories]😀 +`-- README.md ``` -* Linearize: ```bash scripts/convert_to_seq.sh``` \ No newline at end of file diff --git a/src/ABSA.py b/src/ABSA.py new file mode 100644 index 0000000000000000000000000000000000000000..86dccd55348388abafb16227e0cc5c3d4c24945c --- /dev/null +++ b/src/ABSA.py @@ -0,0 +1,41 @@ +import sys +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + ABSA_TERM_CATEGORY_SENTIMENT, ABSA_TERM_OPINION_SENTIMENT, ABSA_CATEGORY_SENTIMENT, ABSA_TERM_SENTIMENT +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import ( + None_knowledge, + +) +from preprocessor.label_funs import ( + extract_aspects_wrapper, +) +import sys + +if __name__ == "__main__": + TASK = ABSA_TERM_SENTIMENT + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Extract all the aspects." + ), + # knowledge_func=concat_list_knowledge_wrapper("person 2 persona: ", " | "), + knowledge_func=None_knowledge, + label_func=extract_aspects_wrapper(" | ", ", "), + ) + ) + + serial_proc.launch() + + \ No newline at end of file diff --git a/src/CC.py b/src/CC.py new file mode 100644 index 0000000000000000000000000000000000000000..7d70f7a990b0822601d660061b2e2722a3966fd1 --- /dev/null +++ b/src/CC.py @@ -0,0 +1,42 @@ +import sys +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + CHIT_CHAT +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import ( + concat_list_knowledge_wrapper, + +) +from preprocessor.label_funs import ( + extract_turn_utterance, +) +import sys + +if __name__ == "__main__": + TASK = CHIT_CHAT + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Response based on the dialogue context and given self persona" + ), + knowledge_func=concat_list_knowledge_wrapper("person 2 persona: ", " | "), + # knowledge_func=None_knowledge, + label_func=extract_turn_utterance, + roles_to_build_example=[["person 2"]], + ) + ) + + serial_proc.launch() + + \ No newline at end of file diff --git a/src/CI.py b/src/CI.py new file mode 100644 index 0000000000000000000000000000000000000000..dc88c3140a1f54ee379b2c9c9ac30b04780509ca --- /dev/null +++ b/src/CI.py @@ -0,0 +1,43 @@ +import sys +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + CHARACTER_IDENTIFICATION +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import ( + None_knowledge, + +) +from preprocessor.label_funs import ( + extract_characters, +) +from preprocessor.process_turn_funcs import introduce_mention_to_utterance_wrapper +import sys + +if __name__ == "__main__": + TASK = CHARACTER_IDENTIFICATION + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Predict all characters mentioned in the given dialogue marked by [ mention ] tag based on the context." + ), + knowledge_func=None_knowledge, + # knowledge_func=None_knowledge, + label_func=extract_characters, + all_turns_process_func=introduce_mention_to_utterance_wrapper(" [ ", " ] "), + ) + ) + + serial_proc.launch() + + \ No newline at end of file diff --git a/src/DCRG.py b/src/DCRG.py new file mode 100644 index 0000000000000000000000000000000000000000..700b26bbf55e1ec21044e831776a5f7e4cc872ac --- /dev/null +++ b/src/DCRG.py @@ -0,0 +1,144 @@ +import sys + +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + DIALOGUE_CONTEXT_TO_RESPONSE_GENERATION, + DOCUMENT_GROUNDED_CONVERSATION, + MULTI_REF_SEP, +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import ( + extract_dialogue_knowledge_wrapper, + origin_knowledge, + None_knowledge, + extract_kg_knowledge_wrapper, + extract_turn_knowledge_wrapper, +) +from preprocessor.label_funs import ( + extract_turn_utterance, +) +import sys + +if __name__ == "__main__": + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + TASK = DOCUMENT_GROUNDED_CONVERSATION + + if len(sys.argv) <= 3: + based_on = "dialogue" + else: + based_on = sys.argv[3] + + if len(sys.argv) < 5: + if based_on == "turn-document": + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Response based on the dialogue context and given knowledge" + ), + # knowledge_func=extract_kg_knowledge_wrapper(": ", " | ", "; ", " "), + # knowledge_func=extract_dialogue_knowledge_wrapper(": ", " | ", ", "), + # knowledge_func=None_knowledge, + knowledge_func=origin_knowledge, + turn_knowledge_func=extract_turn_knowledge_wrapper( + ": ", " | ", ", " + ), + label_func=extract_turn_utterance, + roles_to_build_example=[["user1"], ["user2"]], + # dev_and_test_roles_to_build_example=[["user2"]], + roles_in_history=None, + multi_ref_sep=None, + ) + ) + elif based_on == "document": + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Response based on the dialogue context and given knowledge" + ), + # knowledge_func=extract_kg_knowledge_wrapper(": ", " | ", "; ", " "), + knowledge_func=extract_dialogue_knowledge_wrapper( + ": ", " | ", ", " + ), + # knowledge_func=None_knowledge, + # knowledge_func=origin_knowledge, + label_func=extract_turn_utterance, + roles_to_build_example=[ + ["third-person"], + ["Listener"], + ["Speaker"], + ], + dev_and_test_roles_to_build_example=[ + ["third-person"], + ["Listener"], + ], + ) + ) + elif based_on == "None": + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Response based on the dialogue context and given knowledge" + ), + knowledge_func=None_knowledge, + label_func=extract_turn_utterance, + roles_to_build_example=[["SYSTEM"]], + ) + ) + else: + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Response based on the dialogue context and given knowledge" + ), + knowledge_func=extract_kg_knowledge_wrapper(": ", " | ", "; ", " "), + # knowledge_func=extract_dialogue_knowledge_wrapper(": ", " | ", ", "), + # knowledge_func=None_knowledge, + label_func=extract_turn_utterance, + roles_to_build_example=[["SYSTEM"], ["USER"]], + dev_and_test_roles_to_build_example=[["SYSTEM"]], + ) + ) + else: + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Response based on the dialogue context and given knowledge" + ), + # knowledge_func=extract_kg_knowledge_wrapper(": ", " | ", "; ", " "), + knowledge_func=extract_dialogue_knowledge_wrapper(": ", " | ", ", "), + label_func=extract_turn_utterance, + roles_to_build_example=[["SYSTEM"]], + roles_in_history=[["USER"]], + multi_ref_sep=MULTI_REF_SEP, + ) + ) + + serial_proc.launch() diff --git a/src/DS.py b/src/DS.py new file mode 100644 index 0000000000000000000000000000000000000000..889bfce5b2a60a1819393aab1d3606b1cbb07600 --- /dev/null +++ b/src/DS.py @@ -0,0 +1,39 @@ +import sys +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + DIALOGUE_SUMMARY, +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import ( + None_knowledge, +) +from preprocessor.label_funs import ( + extract_summary, +) +import sys + +if __name__ == "__main__": + TASK = DIALOGUE_SUMMARY + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Summarize the dialogue." + ), + knowledge_func=None_knowledge, + label_func=extract_summary, + ) + ) + + serial_proc.launch() + + \ No newline at end of file diff --git a/src/DST.py b/src/DST.py new file mode 100644 index 0000000000000000000000000000000000000000..aa24b68d2cdbcdc913c07a356dfe2dd60159e38e --- /dev/null +++ b/src/DST.py @@ -0,0 +1,43 @@ +import sys + +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + DIALOGUE_STATE_TRACKING, +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import None_knowledge, extract_turn_domains_wrapper +from preprocessor.label_funs import ( + extract_belief_state_wrapper, +) +import os, shutil + +if __name__ == "__main__": + TASK = DIALOGUE_STATE_TRACKING + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Generate the dialogue state based on the given dialogue context." + ), + knowledge_func=None_knowledge, + label_func=extract_belief_state_wrapper(", ", " | ", "; ", ": "), + roles_to_build_example=[["USER"]], + ) + ) + + serial_proc.launch() + + for split in ["train", "dev", "test"]: + if os.path.isfile(os.path.join(input_data_path, f"{split}_ontology.json")): + shutil.copyfile( + os.path.join(input_data_path, f"{split}_ontology.json"), + os.path.join(output_data_path, f"{split}_ontology.json"), + ) diff --git a/src/DT.py b/src/DT.py new file mode 100644 index 0000000000000000000000000000000000000000..aaf9bc9e992484479f28439b3a8df372415d4b4b --- /dev/null +++ b/src/DT.py @@ -0,0 +1,41 @@ +import sys + +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + DATA_TO_TEXT, MULTI_REF_SEP +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import ( + extract_dict_knowledge_wrapper, +) +from preprocessor.label_funs import ( + extract_turn_utterance, +) +import sys + +if __name__ == "__main__": + # 2. Emotion Recognition + TASK = DATA_TO_TEXT + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Generate the corresponding text based on the given knowledge." + ), + knowledge_func=extract_dict_knowledge_wrapper(": ", " | "), + label_func=extract_turn_utterance, + roles_in_history=[], + multi_ref_sep=MULTI_REF_SEP + ) + ) + + serial_proc.launch() diff --git a/src/ER.py b/src/ER.py new file mode 100644 index 0000000000000000000000000000000000000000..99a5eb91677a210a904e6b3aa1a9865a0ef02db5 --- /dev/null +++ b/src/ER.py @@ -0,0 +1,38 @@ +import sys +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + EMOTION_RECOGNITION, +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import ( + None_knowledge, +) +from preprocessor.label_funs import ( + extract_turn_emotion_wrapper, +) +import sys + +if __name__ == "__main__": + # 2. Emotion Recognition + TASK = EMOTION_RECOGNITION + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Recognize correct emotions based on the dialogue context." + ), + knowledge_func=None_knowledge, + label_func=extract_turn_emotion_wrapper(", "), + ) + ) + + serial_proc.launch() \ No newline at end of file diff --git a/src/ID.py b/src/ID.py new file mode 100644 index 0000000000000000000000000000000000000000..791584d843a10b54864f87d8f1bb6677767f3cef --- /dev/null +++ b/src/ID.py @@ -0,0 +1,32 @@ +import sys + +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import INTENT_DETECTION, MULTI_REF_SEP +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import None_knowledge +from preprocessor.label_funs import ( + extract_intents_wrapper, +) + +if __name__ == "__main__": + TASK = INTENT_DETECTION + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Detect the intent based on the given dialogue context." + ), + knowledge_func=None_knowledge, + label_func=extract_intents_wrapper(" | "), + ) + ) + + serial_proc.launch() diff --git a/src/MCQA.py b/src/MCQA.py new file mode 100644 index 0000000000000000000000000000000000000000..2e90190e543bc5d5faee3c292aae2ec410a3d26d --- /dev/null +++ b/src/MCQA.py @@ -0,0 +1,41 @@ +import sys + +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + MULTIPLE_CHOICE_QUESTION_ANSWERING, +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import ( + # extract_dict_knowledge_wrapper + extract_dialogue_knowledge_wrapper, + # None_knowledge, +) +from preprocessor.label_funs import ( + extract_options, +) + +if __name__ == "__main__": + TASK = MULTIPLE_CHOICE_QUESTION_ANSWERING + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + sort_or_not = len(sys.argv) > 3 + + prompt = "Generate the best choice." + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper(prompt), + knowledge_func=extract_dialogue_knowledge_wrapper(": ", " | ", ", "), + # knowledge_func=extract_dict_knowledge_wrapper(": ", " | "), + label_func=extract_options, + ) + ) + + serial_proc.launch() diff --git a/src/MRC.py b/src/MRC.py new file mode 100644 index 0000000000000000000000000000000000000000..bad0ac6c5d32f014cca9f12f38c5c8d4eb07399f --- /dev/null +++ b/src/MRC.py @@ -0,0 +1,35 @@ +import sys + +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import MACHINE_READING_COMPREHENSION, MULTI_REF_SEP +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import extract_dialogue_knowledge_wrapper +from preprocessor.label_funs import ( + extract_turn_utterance, +) + +if __name__ == "__main__": + TASK = MACHINE_READING_COMPREHENSION + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Answer the question based on the given document and dialogue context." + ), + knowledge_func=extract_dialogue_knowledge_wrapper(": ", " | ", ", "), + label_func=extract_turn_utterance, + roles_in_history=[["USER"]], + roles_to_build_example=[["SYSTEM"]], + multi_ref_sep=MULTI_REF_SEP, + ) + ) + + serial_proc.launch() diff --git a/src/NLI.py b/src/NLI.py new file mode 100644 index 0000000000000000000000000000000000000000..dfdd68008a8b9534e580b59774f2f34473027548 --- /dev/null +++ b/src/NLI.py @@ -0,0 +1,33 @@ +import sys + +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + NATURAL_LANGUAGE_INFERENCE, +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import extract_dict_knowledge_wrapper +from preprocessor.label_funs import ( + extract_options, +) + +if __name__ == "__main__": + TASK = NATURAL_LANGUAGE_INFERENCE + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper("Generate the better hypothesis."), + knowledge_func=extract_dict_knowledge_wrapper(": ", " | "), + # knowledge_func=None_knowledge, + label_func=extract_options, + ) + ) + + serial_proc.launch() diff --git a/src/QCR.py b/src/QCR.py new file mode 100644 index 0000000000000000000000000000000000000000..6bbc01ba753722708abf6ba6f8e34ab04aa3f871 --- /dev/null +++ b/src/QCR.py @@ -0,0 +1,40 @@ +import sys + +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + QUESTION_IN_CONTEXT_REWRITING, +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import extract_dict_knowledge_wrapper, None_knowledge +from preprocessor.label_funs import ( + extrac_rewritten, +) + +if __name__ == "__main__": + TASK = QUESTION_IN_CONTEXT_REWRITING + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + has_knowledge = len(sys.argv) > 3 + + kf = ( + extract_dict_knowledge_wrapper(": ", " | ") if has_knowledge else None_knowledge + ) + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Rewrite the user utterance of current turn based on the given dialogue context." + ), + knowledge_func=kf, + # knowledge_func=None_knowledge, + label_func=extrac_rewritten, + ) + ) + + serial_proc.launch() diff --git a/src/README.md b/src/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a9db684a4a4e9479a3cdd231d0bd8706a08cf04c --- /dev/null +++ b/src/README.md @@ -0,0 +1,13 @@ +# Code directory +``` +. +|-- pretrain: pre-training package +|-- utils +| |-- data +| |-- logger +| |-- model +| |-- tokenizer +| `-- trainer +|-- 😀[TODO: some other tasks directories]😀 +`-- README.md +``` diff --git a/src/RRR.py b/src/RRR.py new file mode 100644 index 0000000000000000000000000000000000000000..32b2846cbe7daad85bbe9032a1c6cb469d84669a --- /dev/null +++ b/src/RRR.py @@ -0,0 +1,36 @@ +import sys + +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + ROLE_RELATION_RECOGNITION, +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import ( + None_knowledge, +) +from preprocessor.label_funs import ( + extract_role_relation_without_turn_wrapper, +) + +if __name__ == "__main__": + TASK = ROLE_RELATION_RECOGNITION + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Judge the relation of two roles in the given dialogue." + ), + knowledge_func=None_knowledge, + label_func=extract_role_relation_without_turn_wrapper("; "), + ) + ) + + serial_proc.launch() diff --git a/src/SF.py b/src/SF.py new file mode 100644 index 0000000000000000000000000000000000000000..9f5dad3380c70de4a090242cdd6b2f128351f95d --- /dev/null +++ b/src/SF.py @@ -0,0 +1,36 @@ +import sys + +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + SLOT_FILLING, +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import ( + None_knowledge, +) +from preprocessor.label_funs import ( + extract_slots_without_intents_wrapper, +) + +if __name__ == "__main__": + TASK = SLOT_FILLING + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Fill all the slots based on the given utterance." + ), + knowledge_func=None_knowledge, + label_func=extract_slots_without_intents_wrapper(", ", "; "), + ) + ) + + serial_proc.launch() diff --git a/src/SP.py b/src/SP.py new file mode 100644 index 0000000000000000000000000000000000000000..d00be5347d98d7f93a6310d73646fe46c07c5577 --- /dev/null +++ b/src/SP.py @@ -0,0 +1,38 @@ +import sys + +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + DIALOGUE_SUMMARY, +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import ( + None_knowledge, +) +from preprocessor.label_funs import ( + extract_sql, +) +import sys + +if __name__ == "__main__": + TASK = "Semantic Parsing" + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Parse the sentence into intents and slots." + ), + knowledge_func=None_knowledge, + label_func=extract_sql, + ) + ) + + serial_proc.launch() diff --git a/src/T2S.py b/src/T2S.py new file mode 100644 index 0000000000000000000000000000000000000000..d8b2287a2d027d3d6d539cceb4871c124d127c8b --- /dev/null +++ b/src/T2S.py @@ -0,0 +1,54 @@ +import sys +import os + +sys.path.append("modules/preprocess") + +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + TEXT2SQL, +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import ( + origin_knowledge, + extract_schema_knowledge_wrapper, +) +from preprocessor.label_funs import ( + extract_sql, +) + +import shutil + +if __name__ == "__main__": + # 8. Text2SQL + TASK = TEXT2SQL + input_data_path = sys.argv[1] + output_data_path = sys.argv[2] + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Parse the SQL based on the given dialogue context and schema." + ), + knowledge_func=origin_knowledge, + turn_knowledge_func=extract_schema_knowledge_wrapper(), + label_func=extract_sql, + ) + ) + + serial_proc.launch() + + shutil.copyfile( + os.path.join(input_data_path, "tables.json"), + os.path.join(output_data_path, "tables.json"), + ) + + if not os.path.exists(os.path.join(output_data_path, "database")): + shutil.copytree( + os.path.join(input_data_path, "database"), + os.path.join(output_data_path, "database"), + ) diff --git a/src/modules/preprocess/__pycache__/config.cpython-312.pyc b/src/modules/preprocess/__pycache__/config.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c95d099528442055a50a6bb2d7ad8d19c8f727a Binary files /dev/null and b/src/modules/preprocess/__pycache__/config.cpython-312.pyc differ diff --git a/src/modules/preprocess/__pycache__/config.cpython-38.pyc b/src/modules/preprocess/__pycache__/config.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c405f5117c074fb458f7b69b1e7add3a2af46e83 Binary files /dev/null and b/src/modules/preprocess/__pycache__/config.cpython-38.pyc differ diff --git a/src/modules/preprocess/__pycache__/const.cpython-312.pyc b/src/modules/preprocess/__pycache__/const.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a2f93ce0f0282e6266b5cacca093a0d60409281 Binary files /dev/null and b/src/modules/preprocess/__pycache__/const.cpython-312.pyc differ diff --git a/src/modules/preprocess/__pycache__/const.cpython-38.pyc b/src/modules/preprocess/__pycache__/const.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adbc54e3548b4c236588a0125f54126281f32305 Binary files /dev/null and b/src/modules/preprocess/__pycache__/const.cpython-38.pyc differ diff --git a/src/modules/preprocess/__pycache__/logger.cpython-312.pyc b/src/modules/preprocess/__pycache__/logger.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abb95b1dde69631b1244d06c228e12a5f7510a25 Binary files /dev/null and b/src/modules/preprocess/__pycache__/logger.cpython-312.pyc differ diff --git a/src/modules/preprocess/__pycache__/logger.cpython-38.pyc b/src/modules/preprocess/__pycache__/logger.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..deaff30d5ffdcff6114aecd6431161326ded18ba Binary files /dev/null and b/src/modules/preprocess/__pycache__/logger.cpython-38.pyc differ diff --git a/src/modules/preprocess/config.py b/src/modules/preprocess/config.py new file mode 100644 index 0000000000000000000000000000000000000000..b9cf73dff8a4c290bbcc74adc054783c46683a71 --- /dev/null +++ b/src/modules/preprocess/config.py @@ -0,0 +1,68 @@ +""" +Base config supporting save and load. Refer to https://github.com/huggingface/transformers/blob/main/src/transformers/configuration_utils.py. +Author: md +""" + +from typing import Dict, Any +import copy +import json + + +class BaseConfig(object): + def __init__( + self, + logger_name: str = None, + log_file: str = None, + log_mode: str = "a", + formatter: str = "%(asctime)s | %(levelname)s | %(message)s", + ) -> None: + """ + params: + ------ + + logger_name: logger name + log_file: the file to ouput log. If `None`, output to stdout + log_mode: mode to write to the log file, `a` is appending. + formatter: logging formatter. + """ + self.logger_name = logger_name + self.log_file = log_file + self.log_mode = log_mode + self.formatter = formatter + + def to_json_string(self) -> Dict[str, Any]: + output = self.to_dict() + return json.dumps(output) + + def to_dict(self) -> Dict[str, Any]: + """ + Serializes this instance to a Python dictionary. + + Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. + """ + output = copy.deepcopy(self.__dict__) + # if "_auto_class" in output: + # del output["_auto_class"] + + return output + + def from_dict(self, config_dict: Dict[str, Any]) -> None: + self.__dict__.update(config_dict) + + def save(self, save_path: str, indent: int = 4) -> None: + with open(save_path, "w") as writer: + json.dump(self.to_dict(), writer, indent=indent) + + def load(self, load_path: str) -> None: + with open(load_path, "r") as reader: + self.__dict__.update(json.load(reader)) + + +if __name__ == "__main__": + config = BaseConfig() + + config.from_dict({"a": 1, "b": 2, "c": "test", "d": True, "e": 3.2}) + config.save("../../test/test1.json") + config.load("../../test/test1.json") + config.save("../../test/test2.json") diff --git a/src/modules/preprocess/const.py b/src/modules/preprocess/const.py new file mode 100644 index 0000000000000000000000000000000000000000..a25b59cafc63ec5bc480be88f506c6e0b9b19f0c --- /dev/null +++ b/src/modules/preprocess/const.py @@ -0,0 +1,73 @@ +""" +Some constant variables. +Author: md +""" + +# The split names +TRAIN_SPLIT = "train" +DEV_SPLIT = "dev" +TEST_SPLIT = "test" + +# Universal dialogue format keywords +DIALOG = "dialog" +ROLES = "roles" +TARGET = "target" +SUMMARY = "summary" +KNOWLEDGE = "knowledge" +UTTERANCE = "utterance" +EMOTIONS = "emotions" +EMOTION = "emotion" +VALUE = "value" +ASPECTS = "aspects" +CATEGORY = "category" +OPINION = "opinion" +SENTIMENT = "sentiment" +CHARACTERS = "characters" +START = "start" +END = "end" +BELIEF_STATE = "belief_state" +DOMAIN = "domain" +INFORMED_SLOT_VALUE_TABLE = "informed_slot_value_table" +SLOT = "slot" +VALUES = "values" +RELATION = "relation" +KNOWLEDGE_TO_SELECT = "knowledge_to_select" +SQL = "sql" +SLOT_VALUE_TABLE = "slot_value_table" +SLOTS_TO_FILL = "slots_to_fill" +ROLE_RELATIONS = "role_relations" +REWRITTEN = "rewritten" +ROLES_TO_SELECT = "roles_to_select" +ACTIVE_INTENTS = "active_intents" + + +# TASK NAMES +DIALOGUE_SUMMARY = "Dialogue Summary" +EMOTION_RECOGNITION = "Emotion Recognition" +DIALOGUE_CONTEXT_TO_RESPONSE_GENERATION = "Dialogue Context-to-Response Generation" +ABSA = "ABSA" +ABSA_TERM_OPINION_SENTIMENT = "ABSA: term opinion sentiment" +ABSA_TERM_CATEGORY_SENTIMENT = "ABSA: term category sentiment" +ABSA_TERM_SENTIMENT = "ABSA: term sentiment" +ABSA_CATEGORY_SENTIMENT = "ABSA: category sentiment" +CHARACTER_IDENTIFICATION = "Character Identification" +DIALOGUE_STATE_TRACKING = "Dialogue State Tracking" +DOCUMENT_GROUNDED_CONVERSATION = "Document Grounded Conversation" +TEXT2SQL = "Text2SQL" +SLOT_FILLING = "Slot Filling" +ROLE_RELATION_RECOGNITION = "Role Relation Recognition" +QUESTION_IN_CONTEXT_REWRITING = "Question in Context Rewriting" +NATURAL_LANGUAGE_INFERENCE = "Natural Language Inference" +MACHINE_READING_COMPREHENSION = "Machine Reading Comprehension" +MULTIPLE_CHOICE_QUESTION_ANSWERING = "Multiple Choice Question Answering" +INTENT_DETECTION = "Intent Detection" +DATA_TO_TEXT = "Data-to-Text" +CHIT_CHAT = "Chit-Chat" + +# Seq2Seq +MULTI_REF_SEP = "__multi_ref_sep__" +OPTION_LABEL = "option_label" +CANDIDATES = "candidates" + +# MENTION +MENTION = "mention" diff --git a/src/modules/preprocess/logger.py b/src/modules/preprocess/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..fff616b6f318d9defbb395d57de4e2bda04c96a5 --- /dev/null +++ b/src/modules/preprocess/logger.py @@ -0,0 +1,30 @@ +""" +Author: md +""" +import logging +import sys + + +def build_logger( + logger_name: str, + level: int, + log_file: str = None, + log_mode: str = "a", + formatter: str = "%(asctime)s | [%(name)s] | %(levelname)s | %(message)s", +): + logger = logging.getLogger(logger_name) + + logger.setLevel(level) + + logger.handlers.clear() + + formatter = logging.Formatter(formatter) + if log_file is not None: + handler = logging.FileHandler(log_file, log_mode) + else: + handler = logging.StreamHandler(sys.stdout) + + handler.setFormatter(formatter) + logger.addHandler(handler) + + return logger diff --git a/src/modules/preprocess/preprocess.py b/src/modules/preprocess/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..580b0f6eacc42fb78841bcf7ef71961e4139cd57 --- /dev/null +++ b/src/modules/preprocess/preprocess.py @@ -0,0 +1,343 @@ +from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor +from const import ( + DIALOGUE_SUMMARY, + EMOTION_RECOGNITION, + DIALOGUE_CONTEXT_TO_TEXT_GENERATION, + ABSA_TERM_OPINION_SENTIMENT, + ABSA_TERM_SENTIMENT, + ABSA_CATEGORY_SENTIMENT, + ABSA_TERM_CATEGORY_SENTIMENT, + CHARACTER_IDENTIFICATION, + DIALOGUE_STATE_TRACKING, + DOCUMENT_GROUNDED_CONVERSATION, + TEXT2SQL, + SLOT_FILLING, +) +from preprocessor.prompt_funcs import const_prompt_func_wrapper +from preprocessor.knowledge_funcs import ( + None_knowledge, + concat_list_knowledge_wrapper, + extract_turn_knowledge_wrapper, + origin_knowledge, + extract_schema_knowledge_wrapper, +) +from preprocessor.label_funs import ( + extract_summary, + extract_turn_emotion_wrapper, + extract_turn_utterance, + extract_aspects_wrapper, + rebuild_utterance_with_characters, + extract_belief_state_wrapper, + extract_sql, + extract_slots_without_intents_wrapper, +) +import os + +if __name__ == "__main__": + # 1. Dialogue Summary + TASK = DIALOGUE_SUMMARY + input_path = r"E:\research\processed\DialogueSummary" + output_path = r"E:\research\seq\DialogueSummary" + + for dataset in os.listdir(input_path): + input_data_path = os.path.join(input_path, dataset) + output_data_path = os.path.join(output_path, dataset) + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "Give a summary of this dialogue." + ), + knowledge_func=None_knowledge, + label_func=extract_summary, + ) + ) + + serial_proc.launch() + + # 2. Emotion Recognition + TASK = EMOTION_RECOGNITION + input_path = r"E:\research\processed\EmotionRecognition" + output_path = r"E:\research\seq\EmotionRecognition" + + for dataset in os.listdir(input_path): + input_data_path = os.path.join(input_path, dataset) + output_data_path = os.path.join(output_path, dataset) + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "With given possible emotions, select the correct answer." + ), + knowledge_func=concat_list_knowledge_wrapper( + "possible choices: ", " | " + ), + label_func=extract_turn_emotion_wrapper(", "), + ) + ) + + serial_proc.launch() + + # 3. Dialogue Context-to-Text Generation + TASK = DIALOGUE_CONTEXT_TO_TEXT_GENERATION + input_path = r"E:\research\processed\Dialogue-Context-to-Text Generation" + output_path = r"E:\research\seq\Dialogue-Context-to-Text Generation" + + for dataset in os.listdir(input_path): + input_data_path = os.path.join(input_path, dataset) + output_data_path = os.path.join(output_path, dataset) + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "With given dialogue context, give the response." + ), + knowledge_func=None_knowledge, + label_func=extract_turn_utterance, + roles_to_build_example=[["Listener"], ["third-person"]], + ) + ) + + serial_proc.launch() + + # 4. Aspect Sentiment Analysis + # 4.1 ABSA: term opinion sentiment + TASK = ABSA_TERM_OPINION_SENTIMENT + input_path = r"E:\research\processed\ABSA-term opinion sentiment\ASTE" + output_path = r"E:\research\seq\Aspect-based Sentiment Analysis\ASTE" + + for dataset in os.listdir(input_path): + input_data_path = os.path.join(input_path, dataset) + output_data_path = os.path.join(output_path, dataset) + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper("Give all the aspects."), + knowledge_func=None_knowledge, + label_func=extract_aspects_wrapper(" | ", ", "), + ) + ) + + serial_proc.launch() + + # 4.2 ABSA: term sentiment + TASK = ABSA_TERM_SENTIMENT + input_path = r"E:\research\processed\ABSA-term sentiment" + output_path = r"E:\research\seq\Aspect-based Sentiment Analysis" + + for dataset in os.listdir(input_path): + input_data_path = os.path.join(input_path, dataset) + output_data_path = os.path.join(output_path, dataset) + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper("Give all the aspects."), + knowledge_func=None_knowledge, + label_func=extract_aspects_wrapper(" | ", ", "), + ) + ) + + serial_proc.launch() + + # 4.3 ABSA: category sentiment + TASK = ABSA_CATEGORY_SENTIMENT + input_path = r"E:\research\processed\ABSA-category sentiment" + output_path = r"E:\research\seq\Aspect-based Sentiment Analysis" + + for dataset in os.listdir(input_path): + input_data_path = os.path.join(input_path, dataset) + output_data_path = os.path.join(output_path, dataset) + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper("Give all the aspects."), + knowledge_func=None_knowledge, + label_func=extract_aspects_wrapper(" | ", ", "), + ) + ) + + serial_proc.launch() + + # 4.4 ABSA: term category sentiment + TASK = ABSA_TERM_CATEGORY_SENTIMENT + input_path = r"E:\research\processed\ABSA-term category sentiment" + output_path = r"E:\research\seq\Aspect-based Sentiment Analysis" + + for dataset in os.listdir(input_path): + input_data_path = os.path.join(input_path, dataset) + output_data_path = os.path.join(output_path, dataset) + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper("Give all the aspects."), + knowledge_func=None_knowledge, + label_func=extract_aspects_wrapper(" | ", ", "), + ) + ) + + serial_proc.launch() + + # 5. Character Identification + TASK = CHARACTER_IDENTIFICATION + input_path = r"E:\research\processed\CharacterIdentification" + output_path = r"E:\research\seq\CharacterIdentification" + + for dataset in os.listdir(input_path): + input_data_path = os.path.join(input_path, dataset) + output_data_path = os.path.join(output_path, dataset) + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper("Generate with all characters."), + knowledge_func=concat_list_knowledge_wrapper("all speakers: ", " | "), + label_func=rebuild_utterance_with_characters, + ) + ) + + serial_proc.launch() + + + # 6. Dialogue State Tracking + TASK = DIALOGUE_STATE_TRACKING + input_path = r"E:\research\processed\DialogueStateTracking" + output_path = r"E:\research\seq\DialogueStateTracking" + + for dataset in os.listdir(input_path): + input_data_path = os.path.join(input_path, dataset) + output_data_path = os.path.join(output_path, dataset) + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "With given dialogue context, give the dialogue state." + ), + knowledge_func=None_knowledge, + label_func=extract_belief_state_wrapper(", ", " | ", "; ", ": "), + roles_to_build_example=[["USER"]], + ) + ) + + serial_proc.launch() + + # 7. Document Grounded Conversation + TASK = DOCUMENT_GROUNDED_CONVERSATION + input_path = r"E:\research\processed\DocumentGroundedConversations" + output_path = r"E:\research\seq\DocumentGroundedConversation" + + for dataset in os.listdir(input_path): + input_data_path = os.path.join(input_path, dataset) + output_data_path = os.path.join(output_path, dataset) + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "With given dialogue context, give the response." + ), + knowledge_func=origin_knowledge, + turn_knowledge_func=extract_turn_knowledge_wrapper(": ", " | ", "; "), + label_func=extract_turn_utterance, + ) + ) + + serial_proc.launch() + # 8. Text2SQL + TASK = TEXT2SQL + input_path = r"E:\research\processed\Text2SQL" + output_path = r"E:\research\seq\Text2SQL" + + for dataset in os.listdir(input_path): + input_data_path = os.path.join(input_path, dataset) + output_data_path = os.path.join(output_path, dataset) + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "With given dialogue context, give the sql." + ), + knowledge_func=origin_knowledge, + turn_knowledge_func=extract_schema_knowledge_wrapper(), + label_func=extract_sql, + ) + ) + + serial_proc.launch() + + TASK = SLOT_FILLING + input_path = r"E:\research\processed\SlotFilling\MultiDoGo" + output_path = r"E:\research\seq\SlotFilling\MultiDoGo" + + for dataset in os.listdir(input_path): + input_data_path = os.path.join(input_path, dataset) + output_data_path = os.path.join(output_path, dataset) + + serial_proc = SerialPreprocessor( + SerialConfig( + input_data_path, + output_data_path, + TASK, + logger_name=TASK, + task_bos_token=f"[{TASK}]", + prompt_func=const_prompt_func_wrapper( + "With given utterance, fill the slots." + ), + knowledge_func=None_knowledge, + label_func=extract_slots_without_intents_wrapper(", ", " | "), + ) + ) + + serial_proc.launch() diff --git a/src/modules/preprocess/preprocessor/SerialPreprocessor.py b/src/modules/preprocess/preprocessor/SerialPreprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..dcde828bb97479f864822c141cb8338f2a3728dc --- /dev/null +++ b/src/modules/preprocess/preprocessor/SerialPreprocessor.py @@ -0,0 +1,428 @@ +""" +Several preprocessor classes. +Author: md +""" + +from preprocessor.base import BasePreprocessorConfig, BasePreprocessor +from const import ( + DIALOGUE_SUMMARY, + DIALOGUE_CONTEXT_TO_RESPONSE_GENERATION, + DIALOG, + KNOWLEDGE, + UTTERANCE, + ROLES, + EMOTION_RECOGNITION, + VALUE, + ABSA, + CHARACTER_IDENTIFICATION, + DIALOGUE_STATE_TRACKING, + DOCUMENT_GROUNDED_CONVERSATION, + TEXT2SQL, + SLOT_FILLING, + ROLE_RELATION_RECOGNITION, + QUESTION_IN_CONTEXT_REWRITING, + NATURAL_LANGUAGE_INFERENCE, + MACHINE_READING_COMPREHENSION, + MULTIPLE_CHOICE_QUESTION_ANSWERING, + INTENT_DETECTION, + DATA_TO_TEXT, + CHIT_CHAT, + TRAIN_SPLIT, +) +from typing import Dict, List, Callable +from copy import deepcopy + + +class SerialConfig(BasePreprocessorConfig): + def __init__( + self, + input_dir: str, + output_dir: str, + task: str, + task_bos_token: str = "", + knowledge_bos_token: str = "[EK]", + prompt_bos_token: str = "[C]", + use_role: bool = True, + turn_sep: str = None, + roles_to_build_example: List = None, + dev_and_test_roles_to_build_example: List = None, + prompt_func: Callable = None, + knowledge_func: Callable = None, + label_func: Callable = None, + turn_knowledge_func: Callable = None, + roles_in_history: List[List] = None, + cur_turn_process_func: Callable = None, + all_turns_process_func: Callable = None, + multi_ref_sep: str = None, + *args, + **kwargs, + ) -> None: + super().__init__(input_dir, output_dir, task, *args, **kwargs) + + self.use_role = use_role + self.turn_sep = turn_sep + self.roles_to_build_example = roles_to_build_example + self.prompt_func = prompt_func + self.task_bos_token = task_bos_token + self.knowledge_bos_token = knowledge_bos_token + self.prompt_bos_token = prompt_bos_token + self.knowledge_func = knowledge_func + self.label_func = label_func + self.turn_knowledge_func = turn_knowledge_func + self.roles_in_history = roles_in_history + self.multi_ref_sep = multi_ref_sep + self.dev_and_test_roles_to_build_example = dev_and_test_roles_to_build_example + self.cur_turn_process_func = cur_turn_process_func + self.all_turns_process_func = all_turns_process_func + + +def concat_roles(roles): + return ", ".join(roles) + + +def concat_dial_history(config: SerialConfig, history: List[Dict]): + # utterance_list = [ + # f"{concat_roles(turn[ROLES])}: {turn[UTTERANCE].strip()}" + # if config.use_role + # else turn[UTTERANCE].strip() + # for turn in history + # ] + + utterance_list = [] + for turn in history: + if ( + config.roles_in_history is not None + and turn[ROLES] not in config.roles_in_history + ): + continue + + if config.use_role: + utterance_list.append( + f"{concat_roles(turn[ROLES])}: {turn[UTTERANCE].strip()}" + ) + else: + utterance_list.append(turn[UTTERANCE].strip()) + + if not utterance_list: + return "None" + + turn_sep = " " + if config.turn_sep is not None: + turn_sep = f" {config.turn_sep} " + + return turn_sep.join(utterance_list) + + +def concat_history_knowledge_prompt( + config: SerialConfig, history: str, knowledge: str = "", prompt: str = "" +): + """Concat `history`, `knowledge` and `prompt`. + + NOTE: the order is fixed now. + """ + text = "" + + if config.task_bos_token is not None: + text = f"{config.task_bos_token} " + + text += history + + if knowledge is not None: + text += f" {config.knowledge_bos_token} {knowledge}" + + if prompt is not None: + text += f" {config.prompt_bos_token} {prompt}" + + return text + + +def clean(text): + return text.replace("\r\n", " ").replace("\n", " ").replace("\r", " ") + + +def add_prefix_to_label(prefix, split, label): + tgt = f"{prefix} {label}" if split == "train" else label + return tgt + + +class SerialPreprocessor(BasePreprocessor): + def __init__(self, config: SerialConfig) -> None: + super().__init__(config) + + def extract_knowledge(self, example: Dict): + if self.config.knowledge_func is None: + knowledge = None + + elif ( + KNOWLEDGE not in example + or not self.config.knowledge_func.__code__.co_argcount + ): + knowledge = self.config.knowledge_func() + else: + knowledge = self.config.knowledge_func(example[KNOWLEDGE][VALUE]) + + return knowledge + + def preprocess_for_dialogue_level(self, split: str, example: Dict, knowledge: str): + label = self.config.label_func(example) + tgt = add_prefix_to_label(self.config.task_bos_token, split, label) + + history = concat_dial_history(self.config, example[DIALOG]) + + if self.config.prompt_func is None: + prompt = "" + elif not self.config.prompt_func.__code__.co_argcount: + prompt = self.config.prompt_func() + + src = concat_history_knowledge_prompt(self.config, history, knowledge, prompt) + + return [{"src": clean(src), "tgt": clean(tgt)}] + + def preprocess_for_label_level(self, split: str, example: Dict, knowledge: str): + label_generator = self.config.label_func(example) + + examples = [] + for turn_id, label, extra_args in label_generator: + tgt = add_prefix_to_label(self.config.task_bos_token, split, label) + + hist = deepcopy(example[DIALOG]) + if self.config.all_turns_process_func is not None: + hist[turn_id] = self.config.all_turns_process_func( + hist[turn_id], *extra_args + ) + + history = concat_dial_history(self.config, hist) + + if self.config.prompt_func is None: + prompt = "" + elif not self.config.prompt_func.__code__.co_argcount: + prompt = self.config.prompt_func() + + src = concat_history_knowledge_prompt( + self.config, history, knowledge, prompt + ) + + examples.append({"src": clean(src), "tgt": clean(tgt)}) + + return examples + + def get_label( + self, turn, include_current_turn, turn_idx, split, origin_knowledge=None + ): + # skip the roles not requiring to build examples + if ( + split != TRAIN_SPLIT + and self.config.dev_and_test_roles_to_build_example is not None + ): + roles_to_build_example = self.config.dev_and_test_roles_to_build_example + else: + roles_to_build_example = self.config.roles_to_build_example + if ( + roles_to_build_example is not None + and turn[ROLES] not in roles_to_build_example + ): + return None + + # skip the first turn if not including current turn + if not include_current_turn and turn_idx == 0: + return None + + if self.config.task != DIALOGUE_STATE_TRACKING: + try: + label = self.config.label_func(turn, split=split) + except: + label = self.config.label_func(turn, origin_knowledge, split=split) + else: + label = self.config.label_func( + turn, self.ontologies[split], do_train=(split == TRAIN_SPLIT) + ) + + return label + + def preprocess_for_turn_level( + self, + split: str, + example: Dict, + knowledge: str, + include_current_turn=False, + origin_knowledge=None, + ): + examples = [] + multiref = [] + for turn_idx, turn in enumerate(example[DIALOG]): + label = self.get_label( + turn, include_current_turn, turn_idx, split, origin_knowledge + ) + + if label is None: + continue + + multiref.append(label) + # requre to merge and arrive at the final consecutive label + if ( + self.config.multi_ref_sep is not None + and split != "train" + and turn_idx < len(example[DIALOG]) - 1 + and self.get_label( + example[DIALOG][turn_idx + 1], + include_current_turn, + turn_idx + 1, + split, + ) + is not None + ): + continue + + if self.config.multi_ref_sep is not None and split != "train": + label = self.config.multi_ref_sep.join(multiref) + + tgt = add_prefix_to_label(self.config.task_bos_token, split, label) + + end = (turn_idx + 1) if include_current_turn else turn_idx + + hist = deepcopy(example[DIALOG][:end]) + if self.config.cur_turn_process_func is not None: + hist[-1] = self.config.cur_turn_process_func(hist[-1]) + + history = concat_dial_history(self.config, hist) + + if self.config.prompt_func is None: + prompt = "" + elif not self.config.prompt_func.__code__.co_argcount: + prompt = self.config.prompt_func() + + if self.config.turn_knowledge_func is not None: + knowledge_to_use = self.config.turn_knowledge_func(knowledge, turn) + else: + knowledge_to_use = knowledge + + src = concat_history_knowledge_prompt( + self.config, history, knowledge_to_use, prompt + ) + + examples.append({"src": clean(src), "tgt": clean(tgt)}) + + multiref = [] + + return examples + + def preprocess_line(self, split: str, example: Dict) -> List[Dict]: + knowledge = self.extract_knowledge(example) + + # 1. Dialogue Summary + if self.config.task == DIALOGUE_SUMMARY: + return self.preprocess_for_dialogue_level(split, example, knowledge) + + # 2. Emotion Recognition + if self.config.task == EMOTION_RECOGNITION: + return self.preprocess_for_turn_level( + split, example, knowledge, include_current_turn=True + ) + + # 3. Dialogue Context-to-Text Generation + if self.config.task == DIALOGUE_CONTEXT_TO_RESPONSE_GENERATION: + return self.preprocess_for_turn_level( + split, example, knowledge, include_current_turn=False + ) + + # 4. ABSA + if self.config.task.startswith(ABSA): + return self.preprocess_for_turn_level( + split, example, knowledge, include_current_turn=True + ) + + # 5. Character Identification + if self.config.task == CHARACTER_IDENTIFICATION: + # return self.preprocess_for_turn_level( + # split, example, knowledge, include_current_turn=True + # ) + # return self.preprocess_for_dialogue_level(split, example, knowledge) + return self.preprocess_for_label_level(split, example, knowledge) + + # 6. Dialogue State Tracking + if self.config.task == DIALOGUE_STATE_TRACKING: + return self.preprocess_for_turn_level( + split, example, knowledge, include_current_turn=True + ) + + # 7. Document Grounded Conversation + if self.config.task == DOCUMENT_GROUNDED_CONVERSATION: + return self.preprocess_for_turn_level( + split, example, knowledge, include_current_turn=False + ) + + # 8. Text2SQL + if self.config.task == TEXT2SQL: + seq_examples = self.preprocess_for_turn_level( + split, example, knowledge, include_current_turn=True + ) + + for idx in range(len(seq_examples)): + seq_examples[idx]["db_id"] = knowledge["db_id"] + + return seq_examples + + # 9. Slot Filling + if self.config.task == SLOT_FILLING: + return self.preprocess_for_turn_level( + split, example, knowledge, include_current_turn=True + ) + + # 10. Relation Recognition + if self.config.task == ROLE_RELATION_RECOGNITION: + return self.preprocess_for_dialogue_level(split, example, knowledge) + + # 11. Question in Context Rewriting + if self.config.task == QUESTION_IN_CONTEXT_REWRITING: + return self.preprocess_for_turn_level( + split, example, knowledge, include_current_turn=True + ) + + # 12. Natural Language Inference + if self.config.task == NATURAL_LANGUAGE_INFERENCE: + return self.preprocess_for_turn_level( + split, + example, + knowledge, + include_current_turn=True, + origin_knowledge=example[KNOWLEDGE][VALUE], + ) + + # 13. Machine Reading Comprehension + if self.config.task == MACHINE_READING_COMPREHENSION: + return self.preprocess_for_turn_level(split, example, knowledge) + + # 14. Multiple Choice Question Answering + if self.config.task == MULTIPLE_CHOICE_QUESTION_ANSWERING: + return self.preprocess_for_turn_level( + split, + example, + knowledge, + include_current_turn=True, + origin_knowledge=example[KNOWLEDGE][VALUE], + ) + + # 15. Intent Detection + if self.config.task == INTENT_DETECTION: + return self.preprocess_for_turn_level( + split, example, knowledge, include_current_turn=True + ) + + # 16. Data-to-Text + if self.config.task == DATA_TO_TEXT: + return self.preprocess_for_turn_level( + split, example, knowledge, include_current_turn=True + ) + + # 17. Chit-Chat + if self.config.task == CHIT_CHAT: + return self.preprocess_for_turn_level( + split, example, knowledge, include_current_turn=False + ) + + if self.config.task == "Semantic Parsing": + seq_examples = self.preprocess_for_turn_level( + split, example, knowledge, include_current_turn=True + ) + + return seq_examples diff --git a/src/modules/preprocess/preprocessor/__pycache__/SerialPreprocessor.cpython-312.pyc b/src/modules/preprocess/preprocessor/__pycache__/SerialPreprocessor.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9b3095258894cdbba463a3273dc6b9193009719 Binary files /dev/null and b/src/modules/preprocess/preprocessor/__pycache__/SerialPreprocessor.cpython-312.pyc differ diff --git a/src/modules/preprocess/preprocessor/__pycache__/SerialPreprocessor.cpython-38.pyc b/src/modules/preprocess/preprocessor/__pycache__/SerialPreprocessor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f65ccefec7069f424a8ae41b79ea17f95fa9fc51 Binary files /dev/null and b/src/modules/preprocess/preprocessor/__pycache__/SerialPreprocessor.cpython-38.pyc differ diff --git a/src/modules/preprocess/preprocessor/__pycache__/base.cpython-312.pyc b/src/modules/preprocess/preprocessor/__pycache__/base.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72de69f30b2d95d5c8c944799dd5e84bbc57762b Binary files /dev/null and b/src/modules/preprocess/preprocessor/__pycache__/base.cpython-312.pyc differ diff --git a/src/modules/preprocess/preprocessor/__pycache__/base.cpython-38.pyc b/src/modules/preprocess/preprocessor/__pycache__/base.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb83bfa2e65c3787639788de19ddc08e0b29a145 Binary files /dev/null and b/src/modules/preprocess/preprocessor/__pycache__/base.cpython-38.pyc differ diff --git a/src/modules/preprocess/preprocessor/__pycache__/knowledge_funcs.cpython-312.pyc b/src/modules/preprocess/preprocessor/__pycache__/knowledge_funcs.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10cb919f44d26925de3697e569e95e0aaf31d802 Binary files /dev/null and b/src/modules/preprocess/preprocessor/__pycache__/knowledge_funcs.cpython-312.pyc differ diff --git a/src/modules/preprocess/preprocessor/__pycache__/knowledge_funcs.cpython-38.pyc b/src/modules/preprocess/preprocessor/__pycache__/knowledge_funcs.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab8202bf4edbf848fb2033344debe6d30dc0a459 Binary files /dev/null and b/src/modules/preprocess/preprocessor/__pycache__/knowledge_funcs.cpython-38.pyc differ diff --git a/src/modules/preprocess/preprocessor/__pycache__/label_funs.cpython-312.pyc b/src/modules/preprocess/preprocessor/__pycache__/label_funs.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc28ba6f7ad4add66c625cd0602c93150cc851a1 Binary files /dev/null and b/src/modules/preprocess/preprocessor/__pycache__/label_funs.cpython-312.pyc differ diff --git a/src/modules/preprocess/preprocessor/__pycache__/label_funs.cpython-38.pyc b/src/modules/preprocess/preprocessor/__pycache__/label_funs.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48184db1e83dc3b53698bc9acbf93536efc6f54e Binary files /dev/null and b/src/modules/preprocess/preprocessor/__pycache__/label_funs.cpython-38.pyc differ diff --git a/src/modules/preprocess/preprocessor/__pycache__/process_turn_funcs.cpython-38.pyc b/src/modules/preprocess/preprocessor/__pycache__/process_turn_funcs.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df5db3e47a1f7bbdc5cc94cc24817293ac2147a4 Binary files /dev/null and b/src/modules/preprocess/preprocessor/__pycache__/process_turn_funcs.cpython-38.pyc differ diff --git a/src/modules/preprocess/preprocessor/__pycache__/prompt_funcs.cpython-312.pyc b/src/modules/preprocess/preprocessor/__pycache__/prompt_funcs.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efac434da41b6ec3937a3305ef71b141427ef43d Binary files /dev/null and b/src/modules/preprocess/preprocessor/__pycache__/prompt_funcs.cpython-312.pyc differ diff --git a/src/modules/preprocess/preprocessor/__pycache__/prompt_funcs.cpython-38.pyc b/src/modules/preprocess/preprocessor/__pycache__/prompt_funcs.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05cc70b162bdec5a46fa87b6d645bd4ddab20cca Binary files /dev/null and b/src/modules/preprocess/preprocessor/__pycache__/prompt_funcs.cpython-38.pyc differ diff --git a/src/modules/preprocess/preprocessor/base.py b/src/modules/preprocess/preprocessor/base.py new file mode 100644 index 0000000000000000000000000000000000000000..4aacdf56ed2e1763191f51ce5308169dc18e34ec --- /dev/null +++ b/src/modules/preprocess/preprocessor/base.py @@ -0,0 +1,150 @@ +""" +Base preprocessor class. +Author: md +""" + +from config import BaseConfig +import os +from logger import build_logger +import logging +import json +from const import TRAIN_SPLIT, DEV_SPLIT, TEST_SPLIT, DIALOGUE_STATE_TRACKING +from typing import Dict +import shutil + + +class BasePreprocessorConfig(BaseConfig): + def __init__( + self, + input_dir: str, + output_dir: str, + task: str, + formatter="%(asctime)s | [%(name)s] | %(levelname)s | %(message)s", + *args, + **kwargs, + ) -> None: + super().__init__(*args, **kwargs) + + self.input_dir = input_dir + self.output_dir = output_dir + self.task = task + self.formatter = formatter + + +class BasePreprocessor(object): + def __init__(self, config: BasePreprocessorConfig) -> None: + self.config = config + self.logger = build_logger( + config.logger_name, + logging.INFO, + config.log_file, + config.log_mode, + config.formatter, + ) + + if self.config.task == DIALOGUE_STATE_TRACKING: + self.ontologies = { + split: self.load_ontology(split) + for split in [TRAIN_SPLIT, DEV_SPLIT, TEST_SPLIT] + } + + def load_ontology(self, split: str) -> Dict: + """ + Load the ontology file. + """ + ontology_file = os.path.join(self.config.input_dir, f"{split}_ontology.json") + if not os.path.exists(ontology_file): + return None + return json.load(open(ontology_file, "r", encoding="utf8")) + + def preprocess_line(self, split: str, example: Dict) -> Dict: + """ + Every preprocessor should customize this function for all `train`, `dev` and `test` split. + """ + raise NotImplementedError("The preprocess line procedure is required!") + + def _preprocess_file( + self, start, infile, src_writer, tgt_writer, split, encoding="UTF-8" + ): + with open(infile, "r", encoding=encoding) as reader: + for line in reader: + if line.strip(): + example = json.loads(line) + if start: + start = False + elif split != "train": + tgt_writer.write("\n") + for processed_example in self.preprocess_line(split, example): + src_writer.write(f"{processed_example['src']}\n") + tgt_writer.write(f"{processed_example['tgt']}") + + if "db_id" in processed_example and split != "train": + tgt_writer.write(f"\t{processed_example['db_id']}") + tgt_writer.write("\n") + return start + + def preprocess(self, split: str) -> bool: + if not os.path.exists(self.config.output_dir): + os.makedirs(self.config.output_dir) + + src_file = os.path.join(self.config.output_dir, f"{split}.src") + tgt_file = os.path.join( + self.config.output_dir, + f"{split}.tgt" if split == "train" else f"{split}.gold", + ) + exist = False + + with open(src_file, "w") as src_writer, open(tgt_file, "w") as tgt_writer: + start = True + for filename in os.listdir(self.config.input_dir): + if split not in filename or not filename.endswith(".jsonl"): + continue + + exist = True + infile = os.path.join(self.config.input_dir, filename) + + self.logger.info(f"preprocessing {infile}") + try: + start = self._preprocess_file( + start, infile, src_writer, tgt_writer, split + ) + except UnicodeDecodeError: + start = self._preprocess_file( + start, infile, src_writer, tgt_writer, split, "ISO-8859-1" + ) + + return exist + + def launch(self) -> None: + self.logger.info(f"Start to preprocess: {TRAIN_SPLIT}") + train = self.preprocess(TRAIN_SPLIT) + assert train + + self.logger.info(f"Start to preprocess: {DEV_SPLIT}") + dev = self.preprocess(DEV_SPLIT) + self.logger.info(f"Start to preprocess: {TEST_SPLIT}") + test = self.preprocess(TEST_SPLIT) + + if dev and not test: + self.logger.info("Copy dev to test") + shutil.copyfile( + os.path.join(self.config.output_dir, "dev.src"), + os.path.join(self.config.output_dir, "test.src"), + ) + shutil.copyfile( + os.path.join(self.config.output_dir, "dev.gold"), + os.path.join(self.config.output_dir, "test.gold"), + ) + + if test and not dev: + self.logger.info("Copy test to dev") + shutil.copyfile( + os.path.join(self.config.output_dir, "test.src"), + os.path.join(self.config.output_dir, "dev.src"), + ) + shutil.copyfile( + os.path.join(self.config.output_dir, "test.gold"), + os.path.join(self.config.output_dir, "dev.gold"), + ) + + self.logger.info("Preprocess successfully!") diff --git a/src/modules/preprocess/preprocessor/knowledge_funcs.py b/src/modules/preprocess/preprocessor/knowledge_funcs.py new file mode 100644 index 0000000000000000000000000000000000000000..22526c70dc10a45f75345ebeaab23e2e0a943dd7 --- /dev/null +++ b/src/modules/preprocess/preprocessor/knowledge_funcs.py @@ -0,0 +1,505 @@ +from typing import List, Dict, Optional, Tuple +import random +import difflib +from rapidfuzz import fuzz +import sqlite3 +import functools +from const import KNOWLEDGE_TO_SELECT, UTTERANCE, ROLES, BELIEF_STATE, DOMAIN + + +def None_knowledge(): + return "None" + + +def concat_list_knowledge_wrapper(prompt: str = "", sep: str = " | "): + def get_list_knowledge(str_list: List[str]): + return prompt + sep.join(str_list) + + return get_list_knowledge + + +def origin_knowledge(knowledge): + return knowledge + + +def extract_turn_knowledge( + knowledge, section_prompt_op, section_sep, section_value_sep +): + if isinstance(knowledge, dict): + sec_list = [] + for section in sorted(knowledge.keys()): + sec_str = f"{section}{section_prompt_op}" + if isinstance(knowledge[section], str): + sec_str += knowledge[section] + elif isinstance(knowledge[section], list): + sec_str += section_value_sep.join(knowledge[section]) + sec_list.append(sec_str) + + return section_sep.join(sec_list) + + elif isinstance(knowledge, str): + return knowledge + + elif isinstance(knowledge, list): + return ";; ".join( + [ + extract_turn_knowledge( + sec, section_prompt_op, section_sep, section_value_sep + ) + for sec in knowledge + ] + ) + + +def extract_turn_domains_wrapper(prompt: str = "", sep: str = ", "): + def extract_turn_domains(knowledge, turn): + bs = turn[BELIEF_STATE] + domains = [] + for state in bs: + domain = state[DOMAIN] + if domain not in domains: + domains.append(domain) + + return prompt + sep.join(domains) + + return extract_turn_domains + + +def extract_turn_knowledge_wrapper(section_prompt_op, section_sep, section_value_sep): + def extract_turn_knowledge_func(knowledge, turn): + return extract_turn_knowledge( + [knowledge[sec] for sec in turn[KNOWLEDGE_TO_SELECT]], + section_prompt_op, + section_sep, + section_value_sep, + ) + + return extract_turn_knowledge_func + + +# Text2SQL +EXIST = {"atis", "geo", "advising", "yelp", "restaurants", "imdb", "academic"} + +# fmt: off +_stopwords = {'who', 'ourselves', 'down', 'only', 'were', 'him', 'at', "weren't", 'has', 'few', "it's", 'm', 'again', + 'd', 'haven', 'been', 'other', 'we', 'an', 'own', 'doing', 'ma', 'hers', 'all', "haven't", 'in', 'but', + "shouldn't", 'does', 'out', 'aren', 'you', "you'd", 'himself', "isn't", 'most', 'y', 'below', 'is', + "wasn't", 'hasn', 'them', 'wouldn', 'against', 'this', 'about', 'there', 'don', "that'll", 'a', 'being', + 'with', 'your', 'theirs', 'its', 'any', 'why', 'now', 'during', 'weren', 'if', 'should', 'those', 'be', + 'they', 'o', 't', 'of', 'or', 'me', 'i', 'some', 'her', 'do', 'will', 'yours', 'for', 'mightn', 'nor', + 'needn', 'the', 'until', "couldn't", 'he', 'which', 'yourself', 'to', "needn't", "you're", 'because', + 'their', 'where', 'it', "didn't", 've', 'whom', "should've", 'can', "shan't", 'on', 'had', 'have', + 'myself', 'am', "don't", 'under', 'was', "won't", 'these', 'so', 'as', 'after', 'above', 'each', 'ours', + 'hadn', 'having', 'wasn', 's', 'doesn', "hadn't", 'than', 'by', 'that', 'both', 'herself', 'his', + "wouldn't", 'into', "doesn't", 'before', 'my', 'won', 'more', 'are', 'through', 'same', 'how', 'what', + 'over', 'll', 'yourselves', 'up', 'mustn', "mustn't", "she's", 're', 'such', 'didn', "you'll", 'shan', + 'when', "you've", 'themselves', "mightn't", 'she', 'from', 'isn', 'ain', 'between', 'once', 'here', + 'shouldn', 'our', 'and', 'not', 'too', 'very', 'further', 'while', 'off', 'couldn', "hasn't", 'itself', + 'then', 'did', 'just', "aren't"} +# fmt: on + +_commonwords = {"no", "yes", "many"} + + +def is_number(s: str) -> bool: + try: + float(s.replace(",", "")) + return True + except: + return False + + +def is_stopword(s: str) -> bool: + return s.strip() in _stopwords + + +def is_commonword(s: str) -> bool: + return s.strip() in _commonwords + + +def is_common_db_term(s: str) -> bool: + return s.strip() in ["id"] + + +class Match(object): + def __init__(self, start: int, size: int) -> None: + self.start = start + self.size = size + + +def is_span_separator(c: str) -> bool: + return c in "'\"()`,.?! " + + +def split(s: str) -> List[str]: + return [c.lower() for c in s.strip()] + + +def prefix_match(s1: str, s2: str) -> bool: + i, j = 0, 0 + for i in range(len(s1)): + if not is_span_separator(s1[i]): + break + for j in range(len(s2)): + if not is_span_separator(s2[j]): + break + if i < len(s1) and j < len(s2): + return s1[i] == s2[j] + elif i >= len(s1) and j >= len(s2): + return True + else: + return False + + +def get_effective_match_source(s: str, start: int, end: int) -> Match: + _start = -1 + + for i in range(start, start - 2, -1): + if i < 0: + _start = i + 1 + break + if is_span_separator(s[i]): + _start = i + break + + if _start < 0: + return None + + _end = -1 + for i in range(end - 1, end + 3): + if i >= len(s): + _end = i - 1 + break + if is_span_separator(s[i]): + _end = i + break + + if _end < 0: + return None + + while _start < len(s) and is_span_separator(s[_start]): + _start += 1 + while _end >= 0 and is_span_separator(s[_end]): + _end -= 1 + + return Match(_start, _end - _start + 1) + + +def get_matched_entries( + s: str, field_values: List[str], m_theta: float = 0.85, s_theta: float = 0.85 +) -> Optional[List[Tuple[str, Tuple[str, str, float, float, int]]]]: + if not field_values: + return None + + if isinstance(s, str): + n_grams = split(s) + else: + n_grams = s + + matched = dict() + for field_value in field_values: + if not isinstance(field_value, str): + continue + fv_tokens = split(field_value) + sm = difflib.SequenceMatcher(None, n_grams, fv_tokens) + match = sm.find_longest_match(0, len(n_grams), 0, len(fv_tokens)) + if match.size > 0: + source_match = get_effective_match_source( + n_grams, match.a, match.a + match.size + ) + if source_match and source_match.size > 1: + match_str = field_value[match.b : match.b + match.size] + source_match_str = s[ + source_match.start : source_match.start + source_match.size + ] + c_match_str = match_str.lower().strip() + c_source_match_str = source_match_str.lower().strip() + c_field_value = field_value.lower().strip() + if ( + c_match_str + and not is_number(c_match_str) + and not is_common_db_term(c_match_str) + ): + if ( + is_stopword(c_match_str) + or is_stopword(c_source_match_str) + or is_stopword(c_field_value) + ): + continue + if c_source_match_str.endswith(c_match_str + "'s"): + match_score = 1.0 + else: + if prefix_match(c_field_value, c_source_match_str): + match_score = ( + fuzz.ratio(c_field_value, c_source_match_str) / 100 + ) + else: + match_score = 0 + if ( + is_commonword(c_match_str) + or is_commonword(c_source_match_str) + or is_commonword(c_field_value) + ) and match_score < 1: + continue + s_match_score = match_score + if match_score >= m_theta and s_match_score >= s_theta: + if field_value.isupper() and match_score * s_match_score < 1: + continue + matched[match_str] = ( + field_value, + source_match_str, + match_score, + s_match_score, + match.size, + ) + + if not matched: + return None + else: + return sorted( + matched.items(), + key=lambda x: (1e16 * x[1][2] + 1e8 * x[1][3] + x[1][4]), + reverse=True, + ) + + +@functools.lru_cache(maxsize=1000, typed=False) +def get_column_picklist(table_name: str, column_name: str, db_path: str) -> list: + fetch_sql = "SELECT DISTINCT `{}` FROM `{}`".format(column_name, table_name) + try: + conn = sqlite3.connect(db_path) + conn.text_factory = bytes + c = conn.cursor() + c.execute(fetch_sql) + picklist = set() + for x in c.fetchall(): + if isinstance(x[0], str): + picklist.add(x[0].encode("utf-8")) + elif isinstance(x[0], bytes): + try: + picklist.add(x[0].decode("utf-8")) + except UnicodeDecodeError: + picklist.add(x[0].decode("latin-1")) + else: + picklist.add(x[0]) + picklist = list(picklist) + finally: + conn.close() + return picklist + + +def get_database_matches( + question: str, + table_name: str, + column_name: str, + db_path: str, + top_k_matches: int = 2, + match_threshold: float = 0.85, +) -> List[str]: + picklist = get_column_picklist( + table_name=table_name, column_name=column_name, db_path=db_path + ) + matches = [] + if picklist and isinstance(picklist[0], str): + matched_entries = get_matched_entries( + s=question, + field_values=picklist, + m_theta=match_threshold, + s_theta=match_threshold, + ) + if matched_entries: + num_values_inserted = 0 + for _match_str, ( + field_value, + _s_match_str, + match_score, + s_match_score, + _match_size, + ) in matched_entries: + if "name" in column_name and match_score * s_match_score < 1: + continue + if table_name != "sqlite_sequence": # Spider database artifact + matches.append(field_value) + num_values_inserted += 1 + if num_values_inserted >= top_k_matches: + break + return matches + + +def serialize_schema( + question: str, + db_path: str, + db_id: str, + db_column_names: Dict[str, str], + db_table_names: List[str], + schema_serialization_type: str = "peteshaw", + schema_serialization_randomized: bool = False, + schema_serialization_with_db_id: bool = True, + schema_serialization_with_db_content: bool = False, + normalize_query: bool = True, +) -> str: + if schema_serialization_type == "verbose": + db_id_str = "Database: {db_id}. " + table_sep = ". " + table_str = "Table: {table}. Columns: {columns}" + column_sep = ", " + column_str_with_values = "{column} ({values})" + column_str_without_values = "{column}" + value_sep = ", " + elif schema_serialization_type == "peteshaw": + # see https://github.com/google-research/language/blob/master/language/nqg/tasks/spider/append_schema.py#L42 + db_id_str = "{db_id}" + table_sep = "" + table_str = " | {table} : {columns}" + column_sep = " , " + column_str_with_values = "{column} ( {values} )" + column_str_without_values = "{column}" + value_sep = " , " + else: + raise NotImplementedError + + def get_column_str(table_name: str, column_name: str) -> str: + column_name_str = column_name.lower() if normalize_query else column_name + if schema_serialization_with_db_content: + matches = get_database_matches( + question=question, + table_name=table_name, + column_name=column_name, + db_path=(db_path + "/" + db_id + "/" + db_id + ".sqlite"), + ) + if matches: + return column_str_with_values.format( + column=column_name_str, values=value_sep.join(matches) + ) + else: + return column_str_without_values.format(column=column_name_str) + else: + return column_str_without_values.format(column=column_name_str) + + tables = [ + table_str.format( + table=table_name.lower() if normalize_query else table_name, + columns=column_sep.join( + map( + lambda y: get_column_str(table_name=table_name, column_name=y[1]), + filter( + lambda y: y[0] == table_id, + zip( + db_column_names["table_id"], + db_column_names["column_name"], + ), + ), + ) + ), + ) + for table_id, table_name in enumerate(db_table_names) + ] + if schema_serialization_randomized: + random.shuffle(tables) + if schema_serialization_with_db_id: + serialized_schema = db_id_str.format(db_id=db_id) + table_sep.join(tables) + else: + serialized_schema = table_sep.join(tables) + return serialized_schema + + +def extract_schema_knowledge_wrapper( + schema_serialization_type: str = "peteshaw", + schema_serialization_randomized: bool = False, + schema_serialization_with_db_id: bool = True, + schema_serialization_with_db_content: bool = False, + normalize_query: bool = True, +): + def extract_turn_schema_knowledge_func(knowledge, turn): + schema = knowledge["schema"] + db_column_names = { + "table_id": [table_id for table_id, _ in schema["column_names_original"]], + "column_name": [ + column_name for _, column_name in schema["column_names_original"] + ], + } + return serialize_schema( + turn[UTTERANCE], + knowledge["db_path"], + knowledge["db_id"], + db_column_names, + schema["table_names_original"], + schema_serialization_type, + schema_serialization_randomized, + schema_serialization_with_db_id, + schema_serialization_with_db_content, + normalize_query, + ) + + return extract_turn_schema_knowledge_func + + +def extract_dict_knowledge(knowledge, key_prompt_op, pair_sep): + pair_list = [] + for key in knowledge: + pair_list.append(f"{key}{key_prompt_op}{knowledge[key]}") + + if not pair_list: + return "None" + + return pair_sep.join(pair_list) + + +def extract_dict_knowledge_wrapper(key_prompt_op, pair_sep): + def extract_dict_knowledge_func(knowledge): + return extract_dict_knowledge(knowledge, key_prompt_op, pair_sep) + + return extract_dict_knowledge_func + + +def extract_dialogue_knowledge(knowledge, key_prompt_op, pair_sep, role_sep): + pair_list = [] + for key in knowledge: + if isinstance(knowledge[key], str): + pair_list.append(f"{key}{key_prompt_op}{knowledge[key]}") + elif isinstance(knowledge[key], list): + turns = [] + for turn in knowledge[key]: + role_str = role_sep.join(turn[ROLES]) + turns.append(f"{role_str}# {turn[UTTERANCE]}") + dial_str = " ".join(turns) + pair_list.append(f"{key}{key_prompt_op}{dial_str}") + if not pair_list: + return "None" + return pair_sep.join(pair_list) + + +def extract_dialogue_knowledge_wrapper(key_prompt_op, pair_sep, role_sep): + def extract_dialogue_knowledge_func(knowledge): + return extract_dialogue_knowledge(knowledge, key_prompt_op, pair_sep, role_sep) + + return extract_dialogue_knowledge_func + + +def extract_kg_knowledge( + knowledge, key_prompt_op, pair_sep, intra_edge_sep, inner_edge_sep +): + pair_list = [] + for key in knowledge: + if isinstance(knowledge[key], str): + pair_list.append(f"{key}{key_prompt_op}{knowledge[key]}") + elif isinstance(knowledge[key], list): + edges = [] + for edge in knowledge[key]: + edges.append(inner_edge_sep.join(edge)) + kg_str = intra_edge_sep.join(edges) + pair_list.append(f"{key}{key_prompt_op}{kg_str}") + if not pair_list: + return "None" + return pair_sep.join(pair_list) + + +def extract_kg_knowledge_wrapper( + key_prompt_op, pair_sep, intra_edge_sep, inner_edge_sep +): + def extract_kg_knowledge_func(knowledge): + return extract_kg_knowledge( + knowledge, key_prompt_op, pair_sep, intra_edge_sep, inner_edge_sep + ) + + return extract_kg_knowledge_func diff --git a/src/modules/preprocess/preprocessor/label_funs.py b/src/modules/preprocess/preprocessor/label_funs.py new file mode 100644 index 0000000000000000000000000000000000000000..f2ddcdb1a1e70cf65d04bef528efb4b8815cb66f --- /dev/null +++ b/src/modules/preprocess/preprocessor/label_funs.py @@ -0,0 +1,324 @@ +from const import ( + SUMMARY, + EMOTIONS, + EMOTION, + UTTERANCE, + ASPECTS, + TARGET, + VALUE, + OPINION, + SENTIMENT, + CATEGORY, + CHARACTERS, + DIALOG, + START, + END, + BELIEF_STATE, + DOMAIN, + INFORMED_SLOT_VALUE_TABLE, + SLOT, + VALUES, + RELATION, + SQL, + SLOT_VALUE_TABLE, + SLOTS_TO_FILL, + ROLE_RELATIONS, + REWRITTEN, + ROLES_TO_SELECT, + ACTIVE_INTENTS, + TRAIN_SPLIT, + OPTION_LABEL, + CANDIDATES, +) +from typing import Dict +import re +import random +import copy +import json + + +def extract_summary(dial: Dict, **kwargs): + """ + `dial` is the full dialog. + """ + return dial[SUMMARY] + + +def extract_turn_emotion(turn: Dict, sep: str, **kwargs): + if EMOTIONS not in turn: + return None + return sep.join(map(lambda x: x[EMOTION], turn[EMOTIONS])) + + +def extract_turn_emotion_wrapper(sep: str): + def extract_turn_emotion_func(turn: Dict, **kwargs): + return extract_turn_emotion(turn, sep) + + return extract_turn_emotion_func + + +def extract_turn_utterance(turn: Dict, **kwargs): + return turn[UTTERANCE] + + +def extract_aspects(turn: Dict, ext_aspect_sep: str, int_aspect_sep: str): + if not turn[ASPECTS]: + return "None" + + aspects = turn[ASPECTS] + + tgt_seq = [] + for aspect in aspects: + aspect_seq = [] + if TARGET in aspect: + aspect_seq.append(aspect[TARGET][VALUE]) + if CATEGORY in aspect: + aspect_seq.append(aspect[CATEGORY]) + + if OPINION in aspect: + aspect_seq.append(aspect[OPINION][VALUE]) + + if SENTIMENT in aspect: + aspect_seq.append(aspect[SENTIMENT]) + + tgt_seq.append(int_aspect_sep.join(aspect_seq)) + + return ext_aspect_sep.join(tgt_seq) + + +def extract_aspects_wrapper(ext_aspect_sep: str, int_aspect_sep: str): + def extract_aspects_func(turn: Dict, **kwargs): + return extract_aspects(turn, ext_aspect_sep, int_aspect_sep) + + return extract_aspects_func + + +def rebuild_utterance_with_characters(turn: Dict, split): + if split == "train": + utterance = turn[UTTERANCE] + parts = [] + pre = 0 + + for character in turn[CHARACTERS]: + parts.append(utterance[pre : character[START]]) + parts.append( + f"[{utterance[character[START]: character[END]]} | {character[VALUE]}]" + ) + pre = character[END] + + parts.append(utterance[pre:]) + return "".join(parts) + + else: + tuples = [] + for character in turn[CHARACTERS]: + tuples.append(f"{character[VALUE]}, {character[START]}, {character[END]}") + + if not tuples: + return "None" + return " | ".join(tuples) + + +def extract_characters(example): + for turn_id, turn in enumerate(example[DIALOG]): + if CHARACTERS not in turn: + continue + + for character in turn[CHARACTERS]: + yield turn_id, character[VALUE], (character[END],) + + +def extract_belief_state( + turn, + value_sep, + domain_sep, + slot_sep, + domain_prompt_op, + ontology=None, + do_train=True, +): + domain_bs = dict() + bs = turn[BELIEF_STATE] + + # spare_bs = {domain: {slot for slot in ontology[domain]} for domain in ontology} + + for state in bs: + domain = state[DOMAIN] + if domain not in domain_bs: + domain_bs[domain] = dict() + + if INFORMED_SLOT_VALUE_TABLE not in state: + continue + + for svp in state[INFORMED_SLOT_VALUE_TABLE]: + slot = svp[SLOT] + values = svp[VALUES] + relation = svp[RELATION] + + if slot not in domain_bs[domain]: + domain_bs[domain][slot] = {"relation": relation, "values": []} + domain_bs[domain][slot]["values"] += list(map(lambda x: x[VALUE], values)) + + # spare_bs[domain].remove(slot) + + domain_bs_list = [] + for domain in domain_bs: + svp_list = [] + for slot in domain_bs[domain]: + val_str = value_sep.join(domain_bs[domain][slot]["values"]) + svp_list.append(f"{slot} {domain_bs[domain][slot]['relation']} {val_str}") + + # control whether to add spare slots + # for slot in sorted(spare_bs[domain]): + # svp_list.append(f"{slot} = None") + if not svp_list: + continue + if do_train: + # shuffle for training + random.shuffle(svp_list) + + # append a slot separator at the end to alleviate the problem of end point prediction of T5 + svt_str = slot_sep.join(svp_list) + slot_sep + + domain_bs_list.append(f"{domain}{domain_prompt_op}{svt_str.strip()}") + + if not domain_bs_list: + return "None" + + return domain_sep.join(domain_bs_list) + + +def extract_belief_state_wrapper(value_sep, domain_sep, slot_sep, domain_prompt_op): + def extract_belief_state_func(turn, ontology, do_train=True, **kwargs): + return extract_belief_state( + turn, + value_sep, + domain_sep, + slot_sep, + domain_prompt_op, + ontology, + do_train=do_train, + ) + + return extract_belief_state_func + + +def normalize(query: str) -> str: + def comma_fix(s): + # Remove spaces in front of commas + return s.replace(" , ", ", ") + + def white_space_fix(s): + # Remove double and triple spaces + return " ".join(s.split()) + + def lower(s): + # Convert everything except text between (single or double) quotation marks to lower case + return re.sub( + r"\b(?|<|!)\s=)", + lambda match: match.group(0).replace(" ", ""), + sql, + ) + + return double_chars_op_fix(brackets_fix(agg_fix(sql))) + + return space_fix(comma_fix(white_space_fix(lower(query)))) + + +def extract_sql(turn, split): + if SQL not in turn: + return None + _normalize = normalize if split == "train" else (lambda x: x) + return _normalize(turn[SQL]) + + +def extract_slots_without_intents(turn, value_sep, slot_sep): + if SLOTS_TO_FILL not in turn or not turn[SLOTS_TO_FILL][SLOT_VALUE_TABLE]: + return "None" + slots = [] + for svp in turn[SLOTS_TO_FILL][SLOT_VALUE_TABLE]: + slots.append( + svp[SLOT] + + " " + + svp[RELATION] + + " " + + value_sep.join(map(lambda x: x[VALUE], svp[VALUES])) + ) + + return (slot_sep.join(slots) + slot_sep).strip() + + +def extract_slots_without_intents_wrapper(value_sep, slot_sep): + def extract_slots_without_intents_func(turn, **kwargs): + return extract_slots_without_intents(turn, value_sep, slot_sep) + + return extract_slots_without_intents_func + + +def extract_role_relation_without_turn(dialog, relation_sep): + return relation_sep.join(map(lambda x: x[RELATION], dialog[ROLE_RELATIONS])) + + +def extract_role_relation_without_turn_wrapper(relation_sep): + def extract_role_relation_without_turn_func(dialog, **kwargs): + return extract_role_relation_without_turn(dialog, relation_sep) + + return extract_role_relation_without_turn_func + + +def extrac_rewritten(turn, **kwargs): + if REWRITTEN not in turn: + return None + return turn[REWRITTEN] + + +def extract_options(turn, knowledge, split=None): + if ROLES_TO_SELECT not in turn: + return None + if split == TRAIN_SPLIT: + return knowledge[turn[ROLES_TO_SELECT][0]] + else: + return json.dumps( + {OPTION_LABEL: turn[ROLES_TO_SELECT][0], CANDIDATES: knowledge} + ) + + +# def extract_roles_wrapper(role_sep): +# def extract_roles_func(turn, knowledge, split=None): +# return extract_options(turn, know) + +# return extract_roles_func + + +def extract_intents(turn, intent_sep): + if not turn[ACTIVE_INTENTS]: + return "None" + return intent_sep.join( + map(lambda intent: intent.replace("_", " "), turn[ACTIVE_INTENTS]) + ) + + +def extract_intents_wrapper(intent_sep): + def extract_intents_func(turn, **kwargs): + return extract_intents(turn, intent_sep) + + return extract_intents_func diff --git a/src/modules/preprocess/preprocessor/process_turn_funcs.py b/src/modules/preprocess/preprocessor/process_turn_funcs.py new file mode 100644 index 0000000000000000000000000000000000000000..cc7b0b9e3ff57f22cbece08451a2027c183dd723 --- /dev/null +++ b/src/modules/preprocess/preprocessor/process_turn_funcs.py @@ -0,0 +1,22 @@ +from const import UTTERANCE, CHARACTERS, START, END, MENTION + + +def introduce_mention_to_utterance(turn, insert_index, left_bracket, right_bracket): + turn[UTTERANCE] = ( + turn[UTTERANCE][:insert_index] + + left_bracket + + MENTION + + right_bracket + + turn[UTTERANCE][insert_index:] + ) + + return turn + + +def introduce_mention_to_utterance_wrapper(left_bracket, right_bracket): + def introduce_mention_to_utterance_func(turn, insert_index, **kwargs): + return introduce_mention_to_utterance( + turn, insert_index, left_bracket, right_bracket + ) + + return introduce_mention_to_utterance_func diff --git a/src/modules/preprocess/preprocessor/prompt_funcs.py b/src/modules/preprocess/preprocessor/prompt_funcs.py new file mode 100644 index 0000000000000000000000000000000000000000..853e599de61b04a891ec211863586cbbe637ed99 --- /dev/null +++ b/src/modules/preprocess/preprocessor/prompt_funcs.py @@ -0,0 +1,5 @@ +def const_prompt_func_wrapper(const_prompt): + def const_prompt_func(): + return const_prompt + + return const_prompt_func diff --git a/src/preprocess.sh b/src/preprocess.sh new file mode 100644 index 0000000000000000000000000000000000000000..08c98fc84460086ad02e04d74ad19dad30eb7a9f --- /dev/null +++ b/src/preprocess.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +cd preprocess + +INPUT=$1 +OUTPUT=$2 + +DATA="MAMS-ACSA" +echo "--> ${DATA}" +# python ${DATA}.py --input_dir "${INPUT}/TaskMaster/TM-1-2019" --output_dir "${OUTPUT}/${DATA}" +python ${DATA}.py --input_dir "${INPUT}/MAMS/data/${DATA}/raw" --output_dir "${OUTPUT}/MAMS/${DATA}" \ No newline at end of file diff --git a/src/preprocess/ASTE.py b/src/preprocess/ASTE.py new file mode 100644 index 0000000000000000000000000000000000000000..dcb62d89aafa76627524948d52702d735a47d2ce --- /dev/null +++ b/src/preprocess/ASTE.py @@ -0,0 +1,97 @@ +from utils import write_jsonl_file, parse, read_line_labels +import os + +sent_map = { + "POS": "positive", + "NEU": "neutral", + "NEG": "negative", +} + + +def get_char_index(lengths, tok_ids): + start = lengths[tok_ids[0]] + tok_ids[0] + end = ( + lengths[tok_ids[-1] + 1] + - lengths[tok_ids[0]] + + start + + tok_ids[-1] + - tok_ids[0] + ) + return start, end + + +def parse_aspects(utterance, aspects): + toks = utterance.split() + lengths = list(map(lambda x: len(x), toks)) + lengths = [0] + lengths + + for i in range(1, len(lengths)): + lengths[i] += lengths[i - 1] + + parsed_aspects = [] + for target, opinion, sentiment in aspects: + target_start, target_end = get_char_index(lengths, target) + opinion_start, opinion_end = get_char_index(lengths, opinion) + target_value = " ".join(toks[target[0] : target[-1] + 1]) + opinion_value = " ".join(toks[opinion[0] : opinion[-1] + 1]) + + assert target_value == utterance[target_start:target_end] + assert opinion_value == utterance[opinion_start:opinion_end] + + parsed_aspects.append( + { + "target": { + "value": target_value, + "start": target_start, + "end": target_end, + }, + "opinion": { + "value": opinion_value, + "start": opinion_start, + "end": opinion_end, + }, + "sentiment": sent_map[sentiment], + } + ) + + return parsed_aspects + + +def reformat(args, file): + for domain in os.listdir(args.input_dir): + path = os.path.join(os.path.join(args.input_dir, domain), f"{file}.txt") + data = read_line_labels(path) + + dials = [] + for line in data: + utterance, aspects = line.strip().split("####") + aspects = eval(aspects) + + dial = { + "turn": "single", + "locale": "en", + "dialog": [ + { + "roles": ["USER"], + "utterance": utterance, + "aspects": parse_aspects(utterance, aspects), + } + ], + } + + dials.append(dial) + + write_jsonl_file( + dials, os.path.join(os.path.join(args.output_dir, domain), f"{file}.jsonl") + ) + + +def preprocess(args): + reformat(args, "train") + reformat(args, "dev") + reformat(args, "test") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/AlphaNLI.py b/src/preprocess/AlphaNLI.py new file mode 100644 index 0000000000000000000000000000000000000000..081edf2677d34d31e468e8e122bcbf54c6c01424 --- /dev/null +++ b/src/preprocess/AlphaNLI.py @@ -0,0 +1,84 @@ +from utils import read_jsonl_file, write_jsonl_file, parse, read_line_labels +import os +import copy + +label2nl = {"1": "First", "2": "Second"} + + +def preprocess_for_train_and_dev(args, file): + data_path = os.path.join(args.input_dir, f"{file}.jsonl") + data = read_jsonl_file(data_path) + + label_path = os.path.join(args.input_dir, f"{file}-labels.lst") + labels = read_line_labels(label_path) + + turns = [] + for idx, example in enumerate(data): + turn = { + "turn": "multi", + "locale": "en", + "dialog": [ + {"roles": ["First observation"], "utterance": example["obs1"]}, + { + "roles": ["Second observation"], + "utterance": example["obs2"], + "roles_to_select": [f"hypothesis candidate {labels[idx]}"], + }, + ], + } + + # turn["dialog"].append( + # { + # "roles": ["First hypothesis"], + # "utterance": example["hyp1"], + # } + # ) + + # turn["dialog"].append( + # { + # "roles": ["Second hypothesis"], + # "utterance": example["hyp2"], + # "roles_to_select": [label2nl[labels[idx]] + " hypothesis"], + # } + # ) + + turn["knowledge"] = { + "type": "text", + "value": { + "hypothesis candidate 1": example["hyp1"], + "hypothesis candidate 2": example["hyp2"], + }, + } + + # turn["roles_to_select"] = ["HYPOTHESIS " + labels[idx]] + turns.append(turn) + + # if labels[idx] == "1": + # pos_hyp = example["hyp1"] + # neg_hyp = example["hyp2"] + # else: + # pos_hyp = example["hyp2"] + # neg_hyp = example["hyp1"] + + # # possitive hypothesis + # pos_turn = copy.deepcopy(turn) + # pos_turn["dialog"].append({"roles": ["HYPOTHESIS"], "utterance": pos_hyp, "class_label": True}) + + # # negative hypothesis + # neg_turn = copy.deepcopy(turn) + # neg_turn["dialog"].append({"roles": ["HYPOTHESIS"], "utterance": neg_hyp, "class_label": False}) + + # turns.append(pos_turn) + # turns.append(neg_turn) + + write_jsonl_file(turns, os.path.join(args.output_dir, f"{file}.jsonl")) + + +def preprocess(args): + preprocess_for_train_and_dev(args, "train") + preprocess_for_train_and_dev(args, "dev") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/Banking77.py b/src/preprocess/Banking77.py new file mode 100644 index 0000000000000000000000000000000000000000..879d4fe0d7a0c84a1182e4c93d347dc49b78679a --- /dev/null +++ b/src/preprocess/Banking77.py @@ -0,0 +1,32 @@ +from utils import write_jsonl_file, read_csv_file, parse + + +def reformat(args, file): + path = args.input_dir + "/" + file + ".csv" + data = read_csv_file(path) + turns = [] + for i in range(len(data)): + t = { + "turn": "single", + "locale": "en", + "dialog": [ + { + "roles": ["USER"], + "utterance": data["text"][i], + "active_intents": [data["category"][i]], + } + ], + } + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl") + + +def preprocess(args): + reformat(args, "train") + reformat(args, "test") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/BiToD.py b/src/preprocess/BiToD.py new file mode 100644 index 0000000000000000000000000000000000000000..068d4c9a7fd13b29c7a3915ad0ac9bbe27667949 --- /dev/null +++ b/src/preprocess/BiToD.py @@ -0,0 +1,145 @@ +import os +from utils import read_json_file, parse, write_jsonl_file + + +def parse_domains(tasks): + domains = set() + + for task in tasks: + domain: str = task["Task"].split("_")[0] + if domain.endswith("s"): + domain = domain[:-1] + domains.add(domain) + + return list(domains) + + +def parse_belief_states(state): + belief_state = [] + for intent in state: + intent_state = {"intent": intent, "slot_value_table": []} + + for slot in state[intent]: + intent_state["slot_value_table"].append( + { + "slot": slot, + "relation": state[intent][slot]["relation"], + "value": state[intent][slot]["value"], + } + ) + + belief_state.append(intent_state) + + return belief_state + + +def preprocess(args): + filenames = [filename for filename in os.listdir(args.input_dir)] + + data = {"train": [], "dev": [], "test": [], "fewshot": []} + + fewshot_dials = [] + for filename in filenames: + if "fewshot" in filename: + fewshot_data = read_json_file(os.path.join(args.input_dir, filename)) + fewshot_dials += fewshot_data["fewshot_dials"] + + fewshot_dials = set(fewshot_dials) + + for filename in filenames: + # ignore this file + if filename == "dict_en_zh.json" or "fewshot" in filename: + continue + path = os.path.join(args.input_dir, filename) + origin_data = read_json_file(path) + locale = filename[:2] + partition = filename.split("_")[1] + + if partition.endswith(".json"): + partition = partition[:-5] + + if partition == "valid": + partition = "dev" + + for dial_id, dialog in origin_data.items(): + parsed_dialog = { + "turn": "multi", + "domain": parse_domains(dialog["Scenario"]["WizardCapabilities"]), + "locale": locale, + "dialog": [], + } + + last_query = None + querying_result = None + for event in dialog["Events"]: + turn = dict() + turn["role"] = event["Agent"] + + if turn["role"] == "User": + turn["active_intent"] = event["active_intent"] + turn["belief_state"] = parse_belief_states(event["state"]) + + else: + if turn["role"] == "Wizard" and "Text" not in event: + assert last_query is None + last_query = { + "Constraints": event["Constraints"], + "API": event["API"], + } + continue + elif turn["role"] == "KnowledgeBase": + assert querying_result is None + querying_result = { + "Item": event["Item"], + "TotalItems": event["TotalItems"], + "Topic": event["Topic"], + } + + continue + else: + if last_query is not None: + turn["query"] = last_query + last_query = None + + turn["querying_result"] = querying_result + querying_result = None + if event["PrimaryItem"]: + turn["main_items"] = [event["PrimaryItem"]] + + if event["SecondaryItem"]: + turn["main_items"].append(event["SecondaryItem"]) + + turn["dialog_act"] = [] + Actions = event["Actions"] + das = dict() + + for action in Actions: + act = action.pop("act") + if act not in das: + das[act] = [] + das[act].append(action) + + for act in das: + turn["dialog_act"].append( + {"act": act, "slot_value_table": das[act]} + ) + + turn["utterance"] = event["Text"] + + parsed_dialog["dialog"].append(turn) + + data[partition].append(parsed_dialog) + + if dial_id in fewshot_dials: + data["fewshot"].append(parsed_dialog) + + for partition in data: + if data[partition]: + write_jsonl_file( + data[partition], os.path.join(args.output_dir, f"{partition}.jsonl") + ) + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/CANARD.py b/src/preprocess/CANARD.py new file mode 100644 index 0000000000000000000000000000000000000000..686f82588e11fc394ab06428e6041c79e4903a19 --- /dev/null +++ b/src/preprocess/CANARD.py @@ -0,0 +1,43 @@ +from utils import read_json_file, parse, write_jsonl_file +import os + + +def reformat(args, file): + path = os.path.join(args.input_dir, f"{file}.json") + data = read_json_file(path) + + turns = [] + for turn in data: + t = {"turn": "multi", "locale": "en", "dialog": []} + for i, utt in enumerate(turn["History"][2:]): + d = {"roles": [("USER", "SYSTEM")[i % 2]], "utterance": utt} + t["dialog"].append(d) + d = { + "roles": ["USER"], + "utterance": turn["Question"], + "rewritten": turn["Rewrite"], + } + t["dialog"].append(d) + + t["knowledge"] = { + "type": "dict", + "value": { + "article title": turn["History"][0], + "section title": turn["History"][1], + }, + } + + turns.append(t) + + write_jsonl_file(turns, os.path.join(args.output_dir, f"{file}.jsonl")) + + +def preprocess(args): + reformat(args, "train") + reformat(args, "dev") + reformat(args, "test") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/CLINC150.py b/src/preprocess/CLINC150.py new file mode 100644 index 0000000000000000000000000000000000000000..5d99c5816832de0301dd9124fd8328dea7f499d3 --- /dev/null +++ b/src/preprocess/CLINC150.py @@ -0,0 +1,79 @@ +from utils import read_json_file, parse, write_jsonl_file +import os + + +def load_intent2domain(args): + data = read_json_file(os.path.join(args.input_dir, "domains.json")) + + intent2domain = {"oos": "oos"} + + for domain in data: + for intent in data[domain]: + assert intent not in intent2domain + intent2domain[intent] = domain + + return intent2domain + + +def load_sent2domain(filename, intent2domain): + data = read_json_file(filename) + + sent2domain = dict() + for split in data: + for example in data[split]: + sent2domain[example[0]] = intent2domain[example[1]] + + return sent2domain + + +def reformat(data_file, data_dir, sent2domain): + origin_data = read_json_file(data_file) + + data = dict() + for split in origin_data: + if "oos" in split: + continue + data[split] = [] + + for example in origin_data[split]: + dial = { + "turn": "single", + "locale": "en", + "domain": [sent2domain[example[0]]] + if example[0] in sent2domain + else ["oos"], + "dialog": [ + { + "roles": ["USER"], + "utterance": example[0], + "active_intents": [example[1]], + } + ], + } + data[split].append(dial) + + for split in data: + if "val" in split: + part = split.replace("val", "dev") + else: + part = split + outfile = os.path.join(data_dir, f"{part}.jsonl") + write_jsonl_file(data[split], outfile) + + +def preprocess(args): + intent2domain = load_intent2domain(args) + sent2domain = load_sent2domain( + os.path.join(args.input_dir, "data_full.json"), intent2domain + ) + + data_file = os.path.join(args.input_dir, "data_full.json") + # data_dir = os.path.join(args.output_dir, filename[:-5]) + data_dir = args.output_dir + + reformat(data_file, data_dir, sent2domain) + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/CMUDoG.py b/src/preprocess/CMUDoG.py new file mode 100644 index 0000000000000000000000000000000000000000..f0747e2babae68d7326a59152d105a167491fd7c --- /dev/null +++ b/src/preprocess/CMUDoG.py @@ -0,0 +1,106 @@ +from utils import parse, write_jsonl_file, read_json_file +import os + + +def build_wiki_index(wiki_dir): + idx2path = dict() + + for wiki_filename in os.listdir(wiki_dir): + wiki_path = os.path.join(wiki_dir, wiki_filename) + wiki_data = read_json_file(wiki_path) + + wiki_idx = wiki_data["wikiDocumentIdx"] + idx2path[wiki_idx] = wiki_path + + return idx2path + + +def merge_consecutive_turns(history): + new_history = [] + cur_turn = None + for turn in history: + if cur_turn is None: + cur_turn = turn + cur_turn["docIdx"] = [cur_turn["docIdx"]] + else: + if cur_turn["uid"] == turn["uid"]: + cur_turn["text"] += " " + turn["text"].strip() + if turn["docIdx"] not in cur_turn["docIdx"]: + cur_turn["docIdx"].append(turn["docIdx"]) + else: + new_history.append(cur_turn) + cur_turn = turn + cur_turn["docIdx"] = [cur_turn["docIdx"]] + if cur_turn is not None: + new_history.append(cur_turn) + + return new_history + + +def load_train_files(args): + conv_dir = os.path.join(args.input_dir, "Conversations") + input_dir = os.path.join(conv_dir, "train") + filenames = [filename for filename in os.listdir(input_dir)] + + return filenames + + +def preprocess(args, split, train_files=None): + wiki_dir = os.path.join(args.input_dir, "WikiData") + conv_dir = os.path.join(args.input_dir, "Conversations") + + idx2path = build_wiki_index(wiki_dir) + input_dir = os.path.join(conv_dir, split) + + if split == "valid": + split = "dev" + output_file = os.path.join(args.output_dir, f"{split}.jsonl") + + processed_data = [] + + def flip_role(role): + if role == "user1": + return "user2" + elif role == "user2": + return "user1" + else: + raise ValueError(f"Unknown role: {role}") + + for filename in os.listdir(input_dir): + if train_files is not None and filename in train_files: + continue + filepath = os.path.join(input_dir, filename) + data = read_json_file(filepath) + history = data["history"] + + history = merge_consecutive_turns(history) + + dialog = {"turn": "multi", "locale": "en", "dialog": []} + + wikipath = idx2path[data["wikiDocumentIdx"]] + wikidata = read_json_file(wikipath) + + flip = False + for idx, turn in enumerate(history): + if idx == 0 and turn["uid"] == "user2": + flip = True + dialog["dialog"].append( + { + "roles": [turn["uid"]] if not flip else [flip_role(turn["uid"])], + "utterance": turn["text"], + "knowledge_to_select": list(map(str, turn["docIdx"])), + } + ) + dialog["knowledge"] = {"type": "dict", "value": wikidata} + + processed_data.append(dialog) + + write_jsonl_file(processed_data, output_file) + + +if __name__ == "__main__": + args = parse() + train_files = load_train_files(args) + preprocess(args, "train") + preprocess(args, "valid", train_files) + preprocess(args, "test", train_files) diff --git a/src/preprocess/COD.py b/src/preprocess/COD.py new file mode 100644 index 0000000000000000000000000000000000000000..885fc9d98abe047242ebe231e07d3fd22fd958e0 --- /dev/null +++ b/src/preprocess/COD.py @@ -0,0 +1,226 @@ +import os +from utils import read_json_file, write_jsonl_file, parse + + +def get_actions_goal(actions): + goal = [] + for item in actions: + g = { + "intent": item["act"], + "slot_value_table": [ + { + "slot": item["slot"], + "value": item["values"][0] if len(item["values"]) else None, + } + ], + } + goal.append(g) + return goal + + +def get_state_goal(state): + goal = [{"intent": state["active_intent"], "slot_value_table": []}] + for slot, value in state["slot_values"].items(): + svt = {"slot": slot, "value": value[0]} + goal[0]["slot_value_table"].append(svt) + return goal + + +def get_call_goal(call): + goal = [{"intent": call["method"], "slot_value_table": []}] + for slot, value in call["parameters"].items(): + svt = {"slot": slot, "value": value} + goal[0]["slot_value_table"].append(svt) + return goal + + +def get_goal(domain_name, content): + goal_func = { + "actions": get_actions_goal, + "state": get_state_goal, + "service_call": get_call_goal, + } + return goal_func[domain_name](content) + + +def get_belief_state(frame): + bs = [] + for domain in ["actions", "state", "service_call"]: + if domain in frame: + blf_stt = {"domain": domain, "goal": get_goal(domain, frame[domain])} + + bs.append(blf_stt) + + return bs + + +def get_querying_result(frame): + if "service_results" not in frame: + return [] + else: + return frame["service_results"] + + +def parse_slots_index(slots): + slot2index = dict() + for slot in slots: + # if slot["slot"] in slot2index and ( + # slot2index[slot["slot"]]["start"] != slot["start"] + # or slot2index[slot["slot"]]["end"] != slot["exclusive_end"] + # ): + # raise ValueError("test") + slot2index[slot["slot"]] = { + "start": slot["start"], + "end": slot["exclusive_end"], + } + return slot2index + + +def parse_belief_state(frames): + intent_domain_bs = dict() + + for frame in frames: + slot2index = parse_slots_index(frame["slots"]) + domain = frame["service"] + state = frame["state"] + intent = state["active_intent"] + + if intent not in intent_domain_bs: + intent_domain_bs[intent] = dict() + + if domain not in intent_domain_bs[intent]: + intent_domain_bs[intent][domain] = { + "svt": [], + } + + for slot in state["slot_values"]: + svp = { + "slot": slot, + "value": state["slot_values"][slot], + "relation": "equal_to", + } + + if slot in slot2index: + svp["start"] = slot2index[slot]["start"] + svp["end"] = slot2index[slot]["end"] + + intent_domain_bs[intent][domain]["svt"].append(svp) + intent_domain_bs[intent][domain]["requested_slots"] = state["requested_slots"] + + bs = [] + for intent in intent_domain_bs: + for domain in intent_domain_bs[intent]: + bs.append( + { + "intent": intent, + "domain": domain, + "requested_slots": intent_domain_bs[intent][domain][ + "requested_slots" + ], + "slot_value_table": intent_domain_bs[intent][domain]["svt"], + } + ) + + return bs + + +def parse_dialog_act(frames): + domain_act_svt = dict() + + for frame in frames: + slot2index = parse_slots_index(frame["slots"]) + domain = frame["service"] + actions = frame["actions"] + + if domain not in domain_act_svt: + domain_act_svt[domain] = dict() + + for action in actions: + act = action["act"] + if act not in domain_act_svt[domain]: + domain_act_svt[domain][act] = [] + + svp = { + "slot": action["slot"], + "value": action["values"], + "relation": "equal_to", + } + + if "canonical_values" in action: + svp["canonical_values"] = (action["canonical_values"],) + + slot = action["slot"] + if slot in slot2index: + svp["start"] = slot2index[slot]["start"] + svp["end"] = slot2index[slot]["end"] + + domain_act_svt[domain][act].append(svp) + + da = [] + for domain in domain_act_svt: + for act in domain_act_svt[domain]: + da.append( + { + "act": act, + "domain": domain, + "slot_value_table": domain_act_svt[domain][act], + } + ) + + return da + + +def preprocess(args): + filenames = os.listdir(args.input_dir) + data = {"train": [], "dev": [], "test": []} + + for filename in filenames: + if not filename.endswith(".json"): + continue + path = os.path.join(args.input_dir, filename) + origin_data = read_json_file(path) + + locale = filename.split("_")[0] + partition = filename.split("_")[1][:-5] + + for dial in origin_data: + parsed_dial = { + "turn": "multi", + "domain": dial["services"], + "locale": locale, + "dialog": [], + } + + for origin_turn in dial["turns"]: + frames = origin_turn["frames"] + + # print(frames) + + turn = { + "role": origin_turn["speaker"], + "utterance": origin_turn["utterance"], + "dialog_act": parse_dialog_act(frames), + } + + if turn["role"] == "USER": + turn["belief_state"] = parse_belief_state(frames) + + if "service_call" in origin_turn: + turn["query"] = origin_turn["service_call"] + if "service_results" in origin_turn: + turn["querying_result"] = origin_turn["querying_result"] + + parsed_dial["dialog"].append(turn) + + data[partition].append(parsed_dial) + + for partition in data: + if data[partition]: + write_jsonl_file( + data[partition], os.path.join(args.output_dir, f"{partition}.jsonl") + ) + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/CamRest676.py b/src/preprocess/CamRest676.py new file mode 100644 index 0000000000000000000000000000000000000000..01c343bc0ec847f34ef10a57383059bdee179b2d --- /dev/null +++ b/src/preprocess/CamRest676.py @@ -0,0 +1,95 @@ +from utils import read_json_file, parse, write_jsonl_file +import os + + +def readfile(input_dir): + path = os.path.join(input_dir, "CamRest676_annotated.json") + data = read_json_file(path) + return data + + +def build_usr_turn(usr): + turn = {"roles": ["USER"], "utterance": usr["transcript"], "belief_state": []} + + act2svp = dict() + for da in usr["slu"]: + act = da["act"] + if act not in act2svp: + act2svp[act] = [] + + for slot, value in da["slots"]: + act2svp[act].append((slot, value)) + + for act in act2svp: + if act != "request": + svt = [] + for slot, value in act2svp[act]: + svt.append( + {"slot": slot, "values": [{"value": value}], "relation": "="} + ) + state = { + "intent": act, + "informed_slot_value_table": svt, + "domain": "restaurant", + } + else: + state = { + "intent": act, + "requested_slots": list(map(lambda x: x[1], act2svp[act])), + "domain": "restaurant", + } + turn["belief_state"].append(state) + + turn["rewritten"] = usr["transcript_complete"] + return turn + + +def build_sys_turn(sys): + turn = {"roles": ["SYSTEM"], "utterance": sys["sent"], "dialog_acts": []} + + svt = [] + for act in sys["DA"]: + svt.append({"slot": act}) + turn["dialog_acts"].append({"act": "REQUEST", "slot_value_table": svt}) + return turn + + +def preprocess(args): + origin_data = readfile(args.input_dir) + + data = [] + for dial in origin_data: + dial = dial["dial"] + + parsed_dial = { + "turn": "multi", + "domain": ["restaurant"], + "locale": "en", + "dialog": [], + } + for origin_turn in dial: + usr = origin_turn["usr"] + sys = origin_turn["sys"] + + parsed_dial["dialog"].append(build_usr_turn(usr)) + parsed_dial["dialog"].append(build_sys_turn(sys)) + + data.append(parsed_dial) + + # split with (3:1:1) + total = len(data) + train_num = total * 3 // 5 + dev_num = total * 1 // 5 + + train_data = data[:train_num] + dev_data = data[train_num : train_num + dev_num] + test_data = data[train_num : train_num + dev_num :] + + write_jsonl_file(train_data, os.path.join(args.output_dir, "train.jsonl")) + write_jsonl_file(dev_data, os.path.join(args.output_dir, "dev.jsonl")) + write_jsonl_file(test_data, os.path.join(args.output_dir, "test.jsonl")) + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/CoQA.py b/src/preprocess/CoQA.py new file mode 100644 index 0000000000000000000000000000000000000000..1f24b194c91fb0dbbf5e93792ba7a62671998795 --- /dev/null +++ b/src/preprocess/CoQA.py @@ -0,0 +1,93 @@ +from utils import read_json_file, write_jsonl_file, parse +import os + + +def preprocess(args, split): + path = os.path.join(args.input_dir, f"coqa-{split}-v1.0.json") + data = read_json_file(path) + + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + data = data["data"] + + turns = [] + for i in range(len(data)): + t = { + "turn": "multi", + "locale": "en", + "dialog": [], + "knowledge": { + "type": "dict", + "value": {"source": data[i]["source"], "passage": data[i]["story"]}, + }, + } + + cand_answers = [list(map(lambda x: x["input_text"], data[i]["answers"]))] + + assert split != "train" or "additional_answers" not in data[i] + if "additional_answers" in data[i]: + for answers in data[i]["additional_answers"].values(): + cand_answers.append(list(map(lambda x: x["input_text"], answers))) + + cand_answers = list(zip(*cand_answers)) + + for q, answers in zip(data[i]["questions"], cand_answers): + dq = {"roles": ["USER"], "utterance": q["input_text"]} + + t["dialog"].append(dq) + for answer in answers: + da = {"roles": ["SYSTEM"], "utterance": answer} + t["dialog"].append(da) + + turns.append(t) + + write_jsonl_file(turns, outfile) + + +# def preprocess_gold(args, file): +# path = os.path.join(args.input_dir, f"{file}.json") +# data = read_json_file(path) +# data = data["data"] + +# turns = [] +# for i in range(len(data)): +# t = { +# "turn": "multi", +# "locale": "en", +# "title": { +# "name": data[i]["name"] +# }, +# "dialog": [], +# "knowledge": { +# "type": "text", +# "value": data[i]["story"] +# } +# } +# for q, a0, a1, a2, a3 in zip( +# data[i]["questions"], +# data[i]["answers"], +# data[i]["additional_answers"]["0"], +# data[i]["additional_answers"]["1"], +# data[i]["additional_answers"]["2"] +# ): +# t = deepcopy(t) +# dq = { +# "role": "question", +# "utterance": q["input_text"] +# } +# da = { +# "role": "answer", +# "utterance": "\n".join([a0["input_text"], a1["input_text"], a2["input_text"], a3["input_text"]]) +# } +# t["dialog"].append(dq) +# t["dialog"].append(da) + +# turns.append(t) + +# write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl") + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") diff --git a/src/preprocess/CoSQL.py b/src/preprocess/CoSQL.py new file mode 100644 index 0000000000000000000000000000000000000000..db71c1d2cb950e95b77ba3a0cbb7cdb33a9ec138 --- /dev/null +++ b/src/preprocess/CoSQL.py @@ -0,0 +1,92 @@ +from Spider import dump_db_json_schema +import json +import os +import copy +from utils import write_jsonl_file, parse +import shutil + + +def preprocess(args, split): + data_filepaths = [ + os.path.join( + os.path.join(args.input_dir, "sql_state_tracking"), f"cosql_{split}.json" + ) + ] + db_path = os.path.join(args.input_dir, "database") + out_db_path = os.path.join(args.output_dir, "database") + + if not os.path.exists(out_db_path): + shutil.copytree(db_path, out_db_path) + + schema_cache = dict() + processed_data = [] + for data_filepath in data_filepaths: + with open(data_filepath, encoding="utf-8") as f: + cosql = json.load(f) + + for sample in cosql: + dialog = { + "locale": "en", + "dialog": [], + } + db_id = sample["database_id"] + if db_id not in schema_cache: + schema_cache[db_id] = dump_db_json_schema( + db_path + "/" + db_id + "/" + db_id + ".sqlite", db_id + ) + schema = schema_cache[db_id] + + dialog["knowledge"] = { + "type": "dict", + "value": {"db_id": db_id, "schema": schema, "db_path": out_db_path}, + } + + final_dialog = copy.deepcopy(dialog) + final_dialog["turn"] = "single" + final_dialog["dialog"].append( + { + "roles": ["USER"], + "utterance": sample["final"]["utterance"] + .replace("``", '"') + .replace("''", '"') + .strip(), + "sql": sample["final"]["query"], + } + ) + + if split == "train": + processed_data.append(final_dialog) + dialog["turn"] = "multi" + roles = ["USER", "SYSTEM"] + + for _, turn in enumerate(sample["interaction"]): + for idx, utterance in enumerate( + turn["utterance"] + .replace("``", '"') + .replace("''", '"') + .strip() + .split("|") + ): + utterance = utterance.strip() + + dialog["dialog"].append( + {"roles": [roles[idx % 2]], "utterance": utterance} + ) + + assert dialog["dialog"][-1]["roles"] == ["USER"] + dialog["dialog"][-1]["sql"] = turn["query"] + + processed_data.append(dialog) + + write_jsonl_file(processed_data, os.path.join(args.output_dir, f"{split}.jsonl")) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") + + shutil.copyfile( + os.path.join(args.input_dir, "tables.json"), + os.path.join(args.output_dir, "tables.json"), + ) diff --git a/src/preprocess/CommonsenseDialog.py b/src/preprocess/CommonsenseDialog.py new file mode 100644 index 0000000000000000000000000000000000000000..75bebf16dbd431dd9a484988a5d815d94250ec1c --- /dev/null +++ b/src/preprocess/CommonsenseDialog.py @@ -0,0 +1,39 @@ +import re +from utils import ( + read_json_file, + read_jsonl_file, + write_json_file, + write_jsonl_file, + parse, +) + + +def reformat(args, file): + path = args.input_dir + "/" + file + ".json" + data = read_json_file(path) + turns = [] + for turn_ in data: + turn = data[turn_] + speaker = turn["speaker"] + t = {"turn": "multi", "locale": "en", "dialog": []} + for j, tt in enumerate(turn["turns"]): + d = {"roles": [str([speaker, "third-person"][j % 2])], "utterance": tt} + t["dialog"].append(d) + + t["knowledge"] = {"type": "str", "value": {"context": turn["context"]}} + turns.append(t) + + if file == "valid": + file = "dev" + write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl") + + +def preprocess(args): + reformat(args, "train") + reformat(args, "valid") + reformat(args, "test") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/CommonsenseQA.py b/src/preprocess/CommonsenseQA.py new file mode 100644 index 0000000000000000000000000000000000000000..15605b9616fa090416c28dfd48f497b061b74087 --- /dev/null +++ b/src/preprocess/CommonsenseQA.py @@ -0,0 +1,52 @@ +import os +from utils import read_jsonl_file, write_jsonl_file, parse, choices + + +def preprocess(args, split): + infile = os.path.join(args.input_dir, f"{split}_rand_split.jsonl") + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + data = read_jsonl_file(infile) + + processed_data = [] + for example in data: + dial = { + "turn": "single", + "locale": "en", + "dialog": [], + "knowledge": { + "type": "dict", + "value": {"question concept": example["question"]["question_concept"]}, + }, + } + + dial["dialog"].append( + {"roles": ["USER"], "utterance": example["question"]["stem"], "roles_to_select": [example["answerKey"]]} + ) + + for choice in example["question"]["choices"]: + dial["knowledge"]["value"][choice["label"]] = choice["text"] + + # answer = example["answerKey"] + # label = -1 + # for idx, choice in enumerate(example["question"]["choices"]): + # dial["dialog"].append( + # {"roles": [f"{choices[idx]} choice"], "utterance": choice["text"]} + # ) + + # if choice["label"] == answer: + # assert label == -1 + # label = idx + + # assert label >= 0 + # dial["dialog"][-1]["roles_to_select"] = [f"{choices[label]} choice"] + + processed_data.append(dial) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") diff --git a/src/preprocess/CommonsenseQA_2.0.py b/src/preprocess/CommonsenseQA_2.0.py new file mode 100644 index 0000000000000000000000000000000000000000..4c0cff594a40c4a116eed2c7df988e20b231ad72 --- /dev/null +++ b/src/preprocess/CommonsenseQA_2.0.py @@ -0,0 +1,48 @@ +import os +from utils import read_jsonl_file, write_jsonl_file, parse, choices + + +def preprocess(args, split): + infile = os.path.join(args.input_dir, f"CSQA2_{split}.json") + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + data = read_jsonl_file(infile) + + processed_data = [] + for example in data: + knowledge = dict() + + if example["relational_prompt_used"]: + knowledge["relational prompt"] = example["relational_prompt"] + if example["topic_prompt_used"]: + knowledge["topic prompt"] = example["topic_prompt"] + + dial = { + "turn": "single", + "locale": "en", + "dialog": [], + "knowledge": {"type": "dict", "value": knowledge}, + } + + dial["dialog"].append({"roles": ["USER"], "utterance": example["question"]}) + + answer = example["answer"] + label = -1 + for idx, choice in enumerate(["yes", "no"]): + dial["knowledge"]["value"][chr(ord('A') + idx)] = choice + if choice == answer: + assert label == -1 + label = idx + + assert label >= 0 + dial["dialog"][-1]["roles_to_select"] = chr(ord('A') + label) + + processed_data.append(dial) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") diff --git a/src/preprocess/CornellMovie.py b/src/preprocess/CornellMovie.py new file mode 100644 index 0000000000000000000000000000000000000000..bc1d1c302dede2be15984cbf9863000a8e51f2a7 --- /dev/null +++ b/src/preprocess/CornellMovie.py @@ -0,0 +1,77 @@ +from utils import parse, write_jsonl_file +import os, json +from tqdm import tqdm + +def replace(text): + text = text.strip() + if text: + return text + + print("Replacing an utterance into `None`") + + return "None" + + +def preprocess(args): + dialogs = [] + + dial = { + "turn": "multi", + "locale": "en", + "dialog": [], + } + + pre_id = None + dial = None + roles = ["USER", "SYSTEM"] + role_idx = 0 + with open(os.path.join(args.input_dir, "utterances.jsonl"), "r") as reader: + for line in tqdm(reader): + turn = json.loads(line) + + if dial is None: + dial = { + "turn": "multi", + "locale": "en", + "dialog": [ + { + "roles": ["USER"], + "utterance": replace(turn["text"]), + } + ], + } + role_idx += 1 + pre_id = turn["conversation_id"] + elif turn["conversation_id"] == pre_id: + dial["dialog"].append( + { + "roles": [roles[role_idx % 2]], + "utterance": replace(turn["text"]), + } + ) + role_idx += 1 + else: + dialogs.append(dial) + dial = { + "turn": "multi", + "locale": "en", + "dialog": [ + { + "roles": ["USER"], + "utterance": replace(turn["text"]), + } + ], + } + role_idx = 1 + pre_id = turn["conversation_id"] + + dialogs.append(dial) + + write_jsonl_file(dialogs[:-360], os.path.join(args.output_dir, "train.jsonl")) + write_jsonl_file(dialogs[-360:-180], os.path.join(args.output_dir, "dev.jsonl")) + write_jsonl_file(dialogs[-180:], os.path.join(args.output_dir, "test.jsonl")) + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/CosmosQA.py b/src/preprocess/CosmosQA.py new file mode 100644 index 0000000000000000000000000000000000000000..efa07e5af370b2b150366f6d1330da37dcf91c62 --- /dev/null +++ b/src/preprocess/CosmosQA.py @@ -0,0 +1,51 @@ +import os +from utils import read_jsonl_file, write_jsonl_file, parse, choices + + +def preprocess(args, split): + infile = os.path.join(args.input_dir, f"{split}.jsonl") + + if split == "valid": + split = "dev" + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + data = read_jsonl_file(infile) + processed_data = [] + + for example in data: + dial = { + "turn": "single", + "locale": "en", + "dialog": [], + "knowledge": {"type": "dict", "value": {}}, + } + + dial["dialog"].append({"roles": ["USER"], "utterance": example["question"]}) + + example.pop("question") + example.pop("id") + + label = example.pop("label") + + # assert 0 <= label <= 3 + + assert len(list(example.keys())) == 5 + for i in range(4): + answer = example[f"answer{i}"] + # dial["dialog"].append( + # {"roles": [f"{choices[i]} choice"], "utterance": answer} + # ) + dial["knowledge"]["value"][f"answer{i}"] = answer + + dial["dialog"][-1]["roles_to_select"] = [f"answer{label}"] + + dial["knowledge"]["value"]["passage"] = example["context"] + processed_data.append(dial) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "valid") diff --git a/src/preprocess/DDRel.py b/src/preprocess/DDRel.py new file mode 100644 index 0000000000000000000000000000000000000000..ca1ec5f69c8eeb288bbf6379bff71a5a7dfe4d5d --- /dev/null +++ b/src/preprocess/DDRel.py @@ -0,0 +1,59 @@ +from utils import ( + read_jsonl_file, + write_jsonl_file, + parse, +) + +label2relation = { + "1": "Child and Parent", + "2": "Child and Other Family Elder", + "3": "Siblings", + "4": "Spouse", + "5": "Lovers", + "6": "Courtship", + "7": "Friends", + "8": "Neighbors", + "9": "Roommates", + "10": "Workplace Superior and Subordinate", + "11": "Colleague or Partners", + "12": "Opponents", + "13": "Professional Contact", +} + + +def reformat(args, file): + path = args.input_dir + "/" + file + ".txt" + data = read_jsonl_file(path) + + turns = [] + for i in range(len(data)): + t = {"turn": "multi", "locale": "en", "dialog": []} + AB2name = {"A": data[i]["nameA"], "B": data[i]["nameB"]} + + for c in data[i]["context"]: + role = c[0] + utterance = c[2:].strip() + # one bad case + if role not in AB2name: + role = "B" + utterance = c + + d = {"roles": [AB2name[role]], "utterance": utterance} + t["dialog"].append(d) + + t["role_relations"] = [{"relation": label2relation[data[i]["label"]]}] + + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl") + + +def preprocess(args): + reformat(args, "train") + reformat(args, "dev") + reformat(args, "test") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/DREAM.py b/src/preprocess/DREAM.py new file mode 100644 index 0000000000000000000000000000000000000000..7729e9b9152b026e8f7d8b5ba4ad097c1d2a74ae --- /dev/null +++ b/src/preprocess/DREAM.py @@ -0,0 +1,61 @@ +from utils import read_json_file, write_jsonl_file, parse, choices +import os +import copy + + +def reformat(args, split): + infile = os.path.join(args.input_dir, f"{split}.json") + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + data = read_json_file(infile) + processed_data = [] + for example in data: + knowledge_dial = [] + for turn in example[0]: + role = turn[0] + role = "woman" if role == "W" else "man" + text = turn[2:].strip() + knowledge_dial.append({"roles": [role], "utterance": text}) + _dial = { + "turn": "single", + "locale": "en", + "dialog": [], + "knowledge": {"type": "list", "value": {}}, + } + + for turn in example[1]: + dialogue = copy.deepcopy(_dial) + dialogue["dialog"].append( + {"roles": ["USER"], "utterance": turn["question"]} + ) + + label = -1 + for idx, choice in enumerate(turn["choice"]): + # dialogue["dialog"].append( + # {"roles": [f"{choices[idx]} choice"], "utterance": choice} + # ) + + dialogue["knowledge"]["value"][chr(ord('A') + idx)] = choice + + if choice == turn["answer"]: + label = idx + + assert label >= 0 + + dialogue["dialog"][-1]["roles_to_select"] = [chr(ord('A') + label)] + dialogue["knowledge"]["value"]["dialogue"] = knowledge_dial + + processed_data.append(dialogue) + + write_jsonl_file(processed_data, outfile) + + +def preprocess(args): + reformat(args, "train") + reformat(args, "dev") + reformat(args, "test") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/DSTC2.py b/src/preprocess/DSTC2.py new file mode 100644 index 0000000000000000000000000000000000000000..f88e5c409a578b0affba7e855c84e674a90a48a2 --- /dev/null +++ b/src/preprocess/DSTC2.py @@ -0,0 +1,147 @@ +from utils import parse, read_json_file, write_jsonl_file, write_json_file +import os + + +def parse_dialogue_act(dialogue_acts, utterance, domain): + parsed_dialogue_acts = [] + for da in dialogue_acts: + svt = [] + + for slot, value in da["slots"]: + value = str(value) + # request + if slot == "slot": + svt.append({"slot": value}) + else: + if value in utterance: + start = utterance.index(value) + end = start + len(value) + else: + start = -1 + end = -1 + svt.append( + { + "slot": slot, + "values": [ + { + "value": value, + "start": start, + "end": end, + } + ], + "relation": "=", + } + ) + + dialogue_act = { + "act": da["act"], + "slot_value_table": svt, + "domain": domain, + } + + parsed_dialogue_acts.append(dialogue_act) + + return parsed_dialogue_acts + + +def preprocess( + input_dir, + output_dir, + split, + domain="restaurant", + intent="FindRestaurants", + write=True, +): + processed_data = [] + + schema = { + domain: set(), + } + + with open(os.path.join(input_dir, f"scripts/config/{split}.flist"), "r") as reader: + for example_dir_name in reader: + example_dir = os.path.join(input_dir, "data", example_dir_name.strip()) + data = read_json_file(os.path.join(example_dir, "log.json")) + label = read_json_file(os.path.join(example_dir, "label.json")) + + dialog = {"turn": "multi", "locale": "en", "dialog": []} + + for turn_idx, turn_data in enumerate(data["turns"]): + # system + utterance = turn_data["output"]["transcript"] + dialog["dialog"].append( + { + "roles": ["SYSTEM"], + "utterance": utterance, + "dialogue_acts": parse_dialogue_act( + turn_data["output"]["dialog-acts"], utterance, domain + ), + } + ) + + # user + label_turn = label["turns"][turn_idx] + # 1-best + # utterance = label_turn["transcription"] + utterance = turn_data["input"]["live"]["asr-hyps"][0]["asr-hyp"] + dialog["dialog"].append( + { + "roles": ["USER"], + "utterance": utterance, + "belief_state": [ + { + "intent": intent, + "informed_slot_value_table": [], + "requested_slots": label_turn["requested-slots"], + "domain": domain, + } + ], + "dialogue_acts": parse_dialogue_act( + label_turn["semantics"]["json"], utterance, domain + ), + } + ) + + for slot, value in label_turn["goal-labels"].items(): + dialog["dialog"][-1]["belief_state"][-1][ + "informed_slot_value_table" + ].append( + { + "slot": slot, + "values": [ + { + "value": value, + } + ], + "relation": "=", + } + ) + + schema[domain].add(slot) + + processed_data.append(dialog) + + ontology = {domain: {slot: True for slot in schema[domain]}} + + if write: + outfile = os.path.join(output_dir, f"{split}.jsonl") + write_jsonl_file(processed_data, outfile) + if "train" in split: + split = "train" + elif "dev" in split: + split = "dev" + elif "test" in split: + split = "test" + write_json_file( + ontology, os.path.join(args.output_dir, f"{split}_ontology.json") + ) + + return processed_data, schema + + +if __name__ == "__main__": + args = parse() + + preprocess(os.path.join(args.input_dir, "traindev"), args.output_dir, "dstc2_train") + preprocess(os.path.join(args.input_dir, "traindev"), args.output_dir, "dstc2_dev") + preprocess(os.path.join(args.input_dir, "test"), args.output_dir, "dstc2_test") diff --git a/src/preprocess/DSTC3.py b/src/preprocess/DSTC3.py new file mode 100644 index 0000000000000000000000000000000000000000..1a21c194411950417f0008a64ac7567eed733ce0 --- /dev/null +++ b/src/preprocess/DSTC3.py @@ -0,0 +1,38 @@ +from utils import parse, write_jsonl_file, write_json_file +from DSTC2 import preprocess +import os + + +if __name__ == "__main__": + args = parse() + + # include DSTC2 training data + train_data, ontology = preprocess( + os.path.join(args.input_dir, "traindev"), + args.output_dir, + "dstc2_train", + write=False, + ) + + tourist_train_data, tourist_ontology = preprocess( + os.path.join(args.input_dir, "seed"), + args.output_dir, + "dstc3_seed", + domain="tourist", + intent="FindPlaceToEat", + write=False, + ) + + train_data += tourist_train_data + ontology.update(tourist_ontology) + + write_jsonl_file(train_data, os.path.join(args.output_dir, "dstc3_train.jsonl")) + write_json_file(ontology, os.path.join(args.output_dir, "train_ontology.json")) + + preprocess( + os.path.join(args.input_dir, "test"), + args.output_dir, + "dstc3_test", + domain="tourist", + intent="FindPlaceToEat", + ) diff --git a/src/preprocess/DSTC6.py b/src/preprocess/DSTC6.py new file mode 100644 index 0000000000000000000000000000000000000000..584a25092fe30fa096b4a67fb214a32ec56a7f15 --- /dev/null +++ b/src/preprocess/DSTC6.py @@ -0,0 +1,95 @@ +import os +from utils import read_json_file, write_jsonl_file, parse + +''' +Integrate all files in one directory +''' + +def preprocess(args): + dirs = {"dev": ["dbdc3_revised/en/dev/CIC_115", "dbdc3_revised/en/dev/IRIS_100", "dbdc3_revised/en/dev/TKTK_100", "dbdc3_revised/en/dev/YI_100"], + "eval": ["dbdc3_revised/en/eval/CIC_50", "dbdc3_revised/en/eval/IRIS_50", "dbdc3_revised/en/eval/TKTK_50", "dbdc3_revised/en/eval/YI_50", + "dbdc3_revised/ja/eval/DCM", "dbdc3_revised/ja/eval/DIT", "dbdc3_revised/ja/eval/IRS"]} + + ''' + add train/eval/test instruction + ''' + + for directory in dirs["dev"]: + real_dir = os.path.join(args.input_dir, directory) + filenames = [os.path.join(real_dir, filename) for filename in os.listdir(real_dir)] + + turns = [] + for path in filenames: + data = read_json_file(path) + t = { + "turn": "multi", + "locale": "en", + "dialog": [], + "knowledge": { + "type": "dialogue", + "value": { + "dialog": [] + } + } + } + + for dial in data["turns"]: + locale = "en" if path.find(r"/en/") != -1 else "ja" + d = {"role": dial["speaker"], + "utterance": dial["utterance"], + "slot_value_table": [], + "summary": None, + "locale": locale, + "scenario": None, + "intent": None, + "topic": None, + "answer": None} + t["dialog"].append(d) + + turns.append(t) + + new_path = os.path.join(args.output_dir, directory) + if not os.path.exists(new_path.rsplit('/', 1)[0]): + os.makedirs(new_path.rsplit('/', 1)[0]) + write_jsonl_file(turns, new_path + '_intergration.jsonl') + + + for directory in dirs["eval"]: + real_dir = os.path.join(args.input_dir, directory) + filenames = [os.path.join(real_dir, filename) for filename in os.listdir(real_dir)] + + turns = [] + for path in filenames: + data = read_json_file(path) + t = {"turn": "multi", + "dialog": [], + "knowledge": None, + "goal": None, + "QA": None} + + for dial in data["turns"]: + locale = "en" if path.find(r"/en/") != -1 else "ja" + d = {"role": dial["speaker"], + "utterance": dial["utterance"], + "slot_value_table": [], + "summary": None, + "locale": locale, + "scenario": None, + "intent": None, + "topic": None, + "answer": None} + t["dialog"].append(d) + + turns.append(t) + + new_path = os.path.join(args.output_dir, directory) + if not os.path.exists(new_path.rsplit('/', 1)[0]): + os.makedirs(new_path.rsplit('/', 1)[0]) + write_jsonl_file(turns, new_path + '_intergration.jsonl') + + +if __name__ == "__main__": + args = parse() + preprocess(args) + + diff --git a/src/preprocess/DSTC8.py b/src/preprocess/DSTC8.py new file mode 100644 index 0000000000000000000000000000000000000000..9268f8f9d17ce13795eb2fe1a540a54af2eeb4e5 --- /dev/null +++ b/src/preprocess/DSTC8.py @@ -0,0 +1,86 @@ +import os +from utils import parse, read_json_file, write_jsonl_file, write_json_file +from typing import List +import shutil + + +def reformat(args, domains, split): + processed_data = [] + ontology = {} + for domain in domains: + train_files = [ + filename + for filename in os.listdir(os.path.join(args.input_dir, domain)) + if split in filename + ] + ontology[domain] = {} + for filename in sorted(train_files): + filepath = os.path.join(args.input_dir, domain, filename) + data = read_json_file(filepath) + for example in data: + processed_example = { + "turn": "single", + "locale": "en", + "dialog": [ + { + "roles": ["USER"], + "utterance": example["userInput"]["text"], + } + ], + } + + if "labels" in example: + processed_example["dialog"][0]["belief_state"] = [ + { + "informed_slot_value_table": [], + "domain": domain, + } + ] + for label in example["labels"]: + if "startIndex" in label["valueSpan"]: + start = label["valueSpan"]["startIndex"] + else: + start = 0 + end = label["valueSpan"]["endIndex"] + value = example["userInput"]["text"][start:end] + + ontology[domain][label["slot"]] = False + + processed_example["dialog"][0]["belief_state"][0][ + "informed_slot_value_table" + ].append( + { + "slot": label["slot"], + "values": [{"value": value}], + "relation": "=", + } + ) + else: + processed_example["dialog"][0]["belief_state"] = [] + + processed_data.append(processed_example) + + write_jsonl_file(processed_data, os.path.join(args.output_dir, f"{split}.jsonl")) + write_json_file(ontology, os.path.join(args.output_dir, f"{split}_ontology.json")) + + +def preprocess(args): + domains = ["Buses_1", "Events_1", "Homes_1", "RentalCars_1"] + reformat(args, domains, "train") + # reformat(args, domains, "dev") + reformat(args, domains, "test") + + # copy test split to dev split + shutil.copy( + os.path.join(args.output_dir, "test.jsonl"), + os.path.join(args.output_dir, "dev.jsonl"), + ) + shutil.copy( + os.path.join(args.output_dir, "test_ontology.json"), + os.path.join(args.output_dir, "dev_ontology.json"), + ) + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/DailyDialog.py b/src/preprocess/DailyDialog.py new file mode 100644 index 0000000000000000000000000000000000000000..e604627800feeb2105456ffb586c19182d7f74aa --- /dev/null +++ b/src/preprocess/DailyDialog.py @@ -0,0 +1,128 @@ +from utils import ( + write_jsonl_file, + parse, +) + +import os + +topics = { + 1: "Ordinary Life", + 2: "School Life", + 3: "Culture & Education", + 4: "Attitude & Emotion", + 5: "Relationship", + 6: "Tourism", + 7: "Health", + 8: "Work", + 9: "Politics", + 10: "Finance", +} + +emotions = { + 0: "neutral", + 1: "anger", + 2: "disgust", + 3: "fear", + 4: "happiness", + 5: "sadness", + 6: "surprise", +} + +acts = {1: "inform", 2: "question", 3: "directive", 4: "commissive"} + + +def load_topics(args): + text_file = os.path.join(args.input_dir, "dialogues_text.txt") + topic_file = os.path.join(args.input_dir, "dialogues_topic.txt") + text2topic = dict() + + with open(text_file, "r", encoding="utf-8") as text_reader, open( + topic_file, "r", encoding="utf-8" + ) as topic_reader: + for line in text_reader: + text = line.strip() + topic = topics[int(topic_reader.readline().strip())] + + # if text in text2topic and text not in [ + # "Can I help you ? __eou__ I hope so . I'm looking for some material for a paper I'm writing , and I'm not quite sure where to look . __eou__ I'll certainly try to help you . What topic is your paper on ? __eou__ My paper is on the influence of television on children . __eou__ There are several possible sources you might use for that topic . I suggest you use the computer and the computer will give you a list of every scientific journal that talks about children and television . __eou__ Thank you for you help . __eou__" + # "Hey , Ann . You don't have a pen , do you ? __eou__ Sure , here you go . __eou__ Thanks . I don't suppose you have some paper , too . __eou__ Of course . There you are . __eou__ Thanks so much . I owe you one ." + # ]: + # print(text, topic, text2topic[text]) + # assert text2topic[text] == topic + + text2topic[text] = topic + + return text2topic + + +def preprocess(args, split, text2topic): + input_dir = os.path.join(args.input_dir, split) + + text_file = os.path.join(input_dir, f"dialogues_{split}.txt") + act_file = os.path.join(input_dir, f"dialogues_act_{split}.txt") + emotion_file = os.path.join(input_dir, f"dialogues_emotion_{split}.txt") + + if split == "validation": + split = "dev" + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + processed_data = [] + + with open(text_file, "r", encoding="utf-8") as text_reader, open( + act_file, "r", encoding="utf-8" + ) as act_reader, open(emotion_file, "r", encoding="utf-8") as emotion_reader: + for line in text_reader: + text = line.strip() + if text in text2topic: + topic = text2topic[text] + else: + _text = "Sam , can we stop at this bicycle shop ? __eou__ Do you want to buy a new bicycle ? __eou__ Yes , and they have a sale on now . __eou__ What happened to your old one ? __eou__ I left it at my parent's house , but I need one here as well . __eou__ I've been using Jim's old bike but he needs it back . __eou__ Let's go then . __eou__ Look at this mountain bike . It is only £ 330 . Do you like it ? __eou__ I prefer something like this one - a touring bike , but it is more expensive . __eou__ How much is it ? __eou__ The price on the tag says £ 565 but maybe you can get a discount . __eou__ OK , let's go and ask . __eou__" + topic = text2topic[_text] + + utterances = text.split("__eou__") + assert not utterances[-1] + utterances = utterances[:-1] + _acts = list( + map(lambda x: acts[int(x)], act_reader.readline().strip().split()) + ) + _emotions = list( + map( + lambda x: emotions[int(x)], + emotion_reader.readline().strip().split(), + ) + ) + + dialogue = { + "turn": "multi", + "locale": "en", + "domain": [topic], + "dialog": [], + "knowledge": {"type": "list", "value": sorted(emotions.values())}, + } + + assert len(utterances) == len(_acts) and len(utterances) == len( + _emotions + ), f"{utterances}\n{_acts}\n{_emotions}" + + roles = ["ROLE1", "ROLE2"] + for idx, utterance in enumerate(utterances): + assert utterance + dialogue["dialog"].append( + { + "roles": [roles[idx % 2]], + "utterance": utterance, + "active_intents": [_acts[idx]], + "emotions": [{"emotion": _emotions[idx]}], + } + ) + + processed_data.append(dialogue) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + text2topic = load_topics(args) + preprocess(args, "train", text2topic) + preprocess(args, "validation", text2topic) + preprocess(args, "test", text2topic) diff --git a/src/preprocess/DialogSum.py b/src/preprocess/DialogSum.py new file mode 100644 index 0000000000000000000000000000000000000000..e3285e7b66b429d686916c534af2203df3c44650 --- /dev/null +++ b/src/preprocess/DialogSum.py @@ -0,0 +1,81 @@ +import re +from utils import ( + read_json_file, + read_jsonl_file, + write_json_file, + write_jsonl_file, + parse, +) + + +def preprocess_for_train_and_dev(args, file): + path = args.input_dir + "/" + file + data = read_jsonl_file(path) + + turns = [] + for i in range(len(data)): + t = { + "turn": "multi", + "domain": [data[i]["topic"]], + "locale": "en", + "dialog": [], + "summary": "", + } + dialogues = re.split("\\n|\\r\\n", data[i]["dialogue"]) + for dialog in dialogues: + ro = re.search("(#.+#): ", dialog) + ru = re.split("#:", dialog) + + if ro.group(1) is None: + print(dialog) + exit() + d = {"roles": [ro.group(1)], "utterance": ru[1].strip()} + t["dialog"].append(d) + t["summary"] = data[i]["summary"] + + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + file) + + +def preprocess_for_test(args, file): + path = args.input_dir + "/" + file + data = read_jsonl_file(path) + + turns = [] + for i in range(len(data)): + t = { + "turn": "multi", + "domain": [data[i]["topic1"], data[i]["topic2"], data[i]["topic3"]], + "locale": "en", + "dialog": [], + "summary": "", + } + dialogues = re.split("\\n|\\r\\n", data[i]["dialogue"]) + for dialog in dialogues: + ro = re.search("(#.+#):", dialog) + ru = re.split("#:", dialog) + + if ro is None: + print(dialog) + exit() + d = {"roles": [ro.group(1)], "utterance": ru[1].strip()} + t["dialog"].append(d) + t["summary"] = "__multi_ref_sep__".join( + [data[i]["summary1"], data[i]["summary2"], data[i]["summary3"]] + ) + + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + file) + + +def preprocess(args): + preprocess_for_train_and_dev(args, "dialogsum.train.jsonl") + preprocess_for_train_and_dev(args, "dialogsum.dev.jsonl") + preprocess_for_test(args, "dialogsum.test.jsonl") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/DoQA.py b/src/preprocess/DoQA.py new file mode 100644 index 0000000000000000000000000000000000000000..01bab8f565274997a945dbb948af12a991c29543 --- /dev/null +++ b/src/preprocess/DoQA.py @@ -0,0 +1,54 @@ +from utils import read_json_file, write_jsonl_file, parse +import os + + +def preprocess(args, split): + path = os.path.join(args.input_dir, f"doqa-cooking-{split}-v2.1.json") + data = read_json_file(path) + + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + data = data["data"] + + turns = [] + for i in range(len(data)): + title = data[i]["title"] + background = data[i]["background"] + + for example in data[i]["paragraphs"]: + passage = example["context"] + if passage.endswith("CANNOTANSWER"): + passage = passage[: -len("CANNOTANSWER")].strip() + knowledge = { + "type": "dict", + "value": {"title": title, "background": background, "passage": passage}, + } + + t = {"turn": "multi", "locale": "en", "dialog": [], "knowledge": knowledge} + + for qa in example["qas"]: + t["dialog"].append({"roles": ["USER"], "utterance": qa["question"]}) + + assert split != "train" or len(qa["answers"]) == 1 + + for answer in qa["answers"]: + text = ( + answer["input_text"] + if "input_text" in answer + else answer["text"] + ) + if text == "CANNOTANSWER": + text = "None" + + t["dialog"].append({"roles": ["SYSTEM"], "utterance": text}) + + turns.append(t) + + write_jsonl_file(turns, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") + preprocess(args, "test") diff --git a/src/preprocess/E2E.py b/src/preprocess/E2E.py new file mode 100644 index 0000000000000000000000000000000000000000..4e2df7575c78261a406adce82921a5912c79e4a7 --- /dev/null +++ b/src/preprocess/E2E.py @@ -0,0 +1,62 @@ +import os +from utils import read_csv_file, parse, write_jsonl_file + + +def parse_knowledge(mr): + svt = mr.split(",") + knowledge = dict() + + for svp in svt: + slot, value = svp.strip().split("[") + + assert value[-1] == "]" + slot = slot.strip() + value = value[:-1].strip() + + knowledge[slot] = value + + return {"type": "dict", "value": knowledge} + + +def preprocess(args, split): + if split != "test": + infile = os.path.join(args.input_dir, f"{split}set.csv") + else: + infile = os.path.join(args.input_dir, "testset_w_refs.csv") + + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + processed_data = [] + data = read_csv_file(infile) + mrs = data["mr"].values + refs = data["refs"] = data["ref"].values + + mr2refs = dict() + + for mr, ref in zip(mrs, refs): + if mr not in mr2refs: + mr2refs[mr] = [] + mr2refs[mr].append(ref) + + for mr in mr2refs: + refs = mr2refs[mr] + dial = { + "turn": "single", + "locale": "en", + "dialog": [], + "knowledge": parse_knowledge(mr), + } + + for ref in refs: + dial["dialog"].append({"roles": ["SYSTEM"], "utterance": ref.strip()}) + + processed_data.append(dial) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") + preprocess(args, "test") diff --git a/src/preprocess/ENLP.py b/src/preprocess/ENLP.py new file mode 100644 index 0000000000000000000000000000000000000000..de4b8b9f1a6f0a215e3c2bcf0890c220aa4fc6bc --- /dev/null +++ b/src/preprocess/ENLP.py @@ -0,0 +1,154 @@ +from utils import parse, read_json_file, write_jsonl_file +import os + +def get_prefix_lengths(lengths): + prefix_lengths = [0] + for length in lengths: + prefix_lengths.append(prefix_lengths[-1] + length) + + return prefix_lengths + + +def custom_join(tokens, start, end): + joined_str = "" + for i in range(start, end): + joined_str += tokens[i] + if ( + i == end - 1 + or tokens[i + 1] in ["-", ","] + or tokens[i + 1].startswith("'") + or tokens[i] == "-" + ): + continue + joined_str += " " + + if joined_str.count('"') > 1: + start = joined_str.index('"') + end = joined_str[start + 1 :].index('"') + joined_str = ( + joined_str[: start + 1] + + joined_str[start + 1 :][:end].strip() + + joined_str[start + 1 :][end:] + ) + + return joined_str + + +def parse_characters(utterance, tokens, character_entities): + utterance_char_idx = 0 + characters = [] + for sent_idx, sent in enumerate(character_entities): + prefix_lengths = get_prefix_lengths(map(len, tokens[sent_idx])) + for character in sent: + span = ", ".join(character[2:]) + scan_length = prefix_lengths[character[0]] + start = utterance_char_idx + + while scan_length >= 0 and start < len(utterance): + if scan_length == 0 and utterance[start] != " ": + break + + if utterance[start] != " ": + scan_length -= 1 + + start += 1 + + scan_length = prefix_lengths[character[1]] - prefix_lengths[character[0]] + end = start + + while scan_length > 0 and end < len(utterance): + if utterance[end] != " ": + scan_length -= 1 + + end += 1 + + characters.append({"value": span, "start": start, "end": end}) + # print(utterance) + # print(tokens) + # print(character_entities) + # print( + # utterance_char_idx, + # start, + # end, + # utterance[start:end], + # tokens[sent_idx][character[0] : character[1]], + # custom_join(tokens[sent_idx], character[0], character[1]), + # ) + + if utterance[start:end] == "Emily- noooo": + continue + + assert utterance[start:end] == custom_join( + tokens[sent_idx], character[0], character[1] + ) + + # update utterance char idx + scan_length = prefix_lengths[-1] + while scan_length >= 0 and utterance_char_idx < len(utterance): + if scan_length == 0 and utterance[utterance_char_idx] != " ": + break + + if utterance[utterance_char_idx] != " ": + scan_length -= 1 + + utterance_char_idx += 1 + + return characters + + +def preprocess(args, split): + input_file = os.path.join(args.input_dir, f"character-identification-{split}.json") + + if split == "trn": + split = "train" + elif split == "tst": + split = "test" + + output_file = os.path.join(args.output_dir, f"{split}.jsonl") + + episodes = read_json_file(input_file)["episodes"] + + processed_data = [] + for episode in episodes: + scenes = episode["scenes"] + for scene in scenes: + utterances = scene["utterances"] + dialog = { + "turn": "multi", + "locale": "en", + "dialog": [], + } + + roles = ["#GENERAL#", "#OTHER#", "#ALL#"] + # labels = set() + for example in utterances: + utterance = example["transcript"] + tokens = example["tokens"] + character_entities = example["character_entities"] + + characters = parse_characters(utterance, tokens, character_entities) + dialog["dialog"].append( + { + "roles": example["speakers"], + "utterance": utterance, + "characters": characters, + } + ) + + roles += example["speakers"] + + # for character in characters: + # labels.add(character["value"]) + + # assert not len(labels - set(roles)), f"\n{sorted(labels)} \n {sorted(set(roles))}" + dialog["knowledge"] = {"type": "lsit", "value": sorted(roles)} + processed_data.append(dialog) + + write_jsonl_file(processed_data, output_file) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "trn") + preprocess(args, "dev") + preprocess(args, "tst") diff --git a/src/preprocess/EmoryNLP.py b/src/preprocess/EmoryNLP.py new file mode 100644 index 0000000000000000000000000000000000000000..1e047aff7997c32e6339ca2d7d5a85710ed81a0a --- /dev/null +++ b/src/preprocess/EmoryNLP.py @@ -0,0 +1,47 @@ +import re +from utils import read_json_file, read_jsonl_file, write_json_file, write_jsonl_file, read_txt_file, read_csv_file, parse + + +def reformat(args, file): + path = args.input_dir + "/" + file + ".csv" + data = read_csv_file(path) + turns = [] + for i in range(1, len(data)): + ds = data[i] + t = { + "turn": "single", + "locale": "en", + "dialog": [] + } + d = {"role": ds[1].strip("\[\]\'"), + "utterance": ds[0], + "belief_state": [{"domain": None, + "goal": [{"intent": None, + "slot_value_table": [{"slot": 'Emotion', "value": ds[2]}, + {"slot": 'Sentiment', "value": ds[4]} + ]} + ]} + ], + "querying_result": [], + "summary": None, + "locale": None, + "topic" : None, + "opinions": None, + "answer": None} + t["dialog"].append(d) + + t["knowledge"] = None + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl") + + +def preprocess(args): + reformat(args, "emorynlp_train_final") + reformat(args, "emorynlp_dev_final") + reformat(args, "emorynlp_test_final") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/EmpathicDialogue.py b/src/preprocess/EmpathicDialogue.py new file mode 100644 index 0000000000000000000000000000000000000000..374750b3d836879294bd92a0bef2585e03fd0074 --- /dev/null +++ b/src/preprocess/EmpathicDialogue.py @@ -0,0 +1,78 @@ +import os +from utils import write_jsonl_file, parse + + +def reformat(args, split): + infile = os.path.join(args.input_dir, f"{split}.csv") + + if split == "valid": + split = "dev" + + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + processed_data = [] + conv_id = None + dialogue = {"turn": "multi", "locale": "en", "dialog": []} + roles = ["Speaker", "Listener"] + turn_idx = -1 + + with open(infile, "r", encoding="UTF-8") as reader: + # skip the first header line + reader.readline() + for line in reader: + if not line.strip(): + continue + + parts = line.strip().split(",") + + if conv_id is not None and parts[0] == conv_id: + turn_idx += 1 + dialogue["dialog"].append( + { + "roles": [roles[turn_idx % 2]], + "utterance": parts[5].replace("_comma_", ","), + } + ) + # next dialogue + else: + # save previous dialogue + if conv_id is not None: + processed_data.append(dialogue) + + # current dialogue + turn_idx = 0 + conv_id = parts[0] + + dialogue = { + "turn": "multi", + "locale": "en", + "dialog": [ + { + "roles": [roles[turn_idx % 2]], + "utterance": parts[5].replace("_comma_", ","), + } + ], + "knowledge": { + "type": "dict", + "value": { + "emotion": parts[2], + "passage": parts[3].replace("_comma_", ","), + }, + }, + } + + if conv_id is not None: + processed_data.append(dialogue) + + write_jsonl_file(processed_data, outfile) + + +def preprocess(args): + reformat(args, "train") + reformat(args, "valid") + reformat(args, "test") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/FriendsQA.py b/src/preprocess/FriendsQA.py new file mode 100644 index 0000000000000000000000000000000000000000..b3f07817237358cc591d6ce17fb88f1ce1508620 --- /dev/null +++ b/src/preprocess/FriendsQA.py @@ -0,0 +1,63 @@ +import os +from utils import read_json_file, parse, write_jsonl_file + + +def parse_dialogue_knowledge(utterances): + dialogue = [] + + for utter in utterances: + dialogue.append({"roles": utter["speakers"], "utterance": utter["utterance"]}) + + knowledge = {"type": "dict", "value": {"dialogue": dialogue}} + + return knowledge + + +def preprocess(args, split): + infile = os.path.join(args.input_dir, f"friendsqa_{split}.json") + + if split == "trn": + split = "train" + elif split == "tst": + split = "test" + + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + data = read_json_file(infile)["data"] + processed_data = [] + + for part in data: + for example in part["paragraphs"]: + assert "utterances:" in example, example.keys() + knowledge = parse_dialogue_knowledge(example["utterances:"]) + + for qa in example["qas"]: + dial = { + "turn": "single", + "locale": "en", + "dialog": [], + "knowledge": knowledge, + } + + dial["dialog"].append({"roles": ["USER"], "utterance": qa["question"]}) + + for answer in qa["answers"]: + dial["dialog"].append( + { + "roles": ["SYSTEM"], + "utterance": answer["answer_text"], + "start": answer["inner_start"], + "end": answer["inner_end"], + "dialog_turn": answer["utterance_id"], + } + ) + processed_data.append(dial) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "trn") + preprocess(args, "dev") + preprocess(args, "tst") diff --git a/src/preprocess/GlobalWoZ.py b/src/preprocess/GlobalWoZ.py new file mode 100644 index 0000000000000000000000000000000000000000..f15e82ad7054cb333981b08e46fcac2f50cfb281 --- /dev/null +++ b/src/preprocess/GlobalWoZ.py @@ -0,0 +1,116 @@ +import os +from utils import read_json_file, write_jsonl_file, parse + + +def get_belief_state(metadata): + bs = [] + if not len(metadata): + return bs + + for domain_name, domain_content in metadata.items(): + blf_stt = {"domain": domain_name, + "goal": []} + + if len(domain_content) > 0: + for intent_name, intent_content in domain_content.items(): + goal = {"intent": intent_name, + "slot_value_table": []} + + if isinstance(intent_content, list): + for item in intent_content: + svt = {"slot": intent_name, + "value": item} + goal["slot_value_table"].append(svt) + + else: + for slot, value in intent_content.items(): + if isinstance(value, (list, dict)) and len(value) == 0: # vacant + real_value = None + elif isinstance(value, (list, dict)) and len(value) == 1: # list or diction with one value + real_value = value[0] + elif isinstance(value, (str, bool)): # string or bool + real_value = value + + svt = {"slot": slot, + "value": real_value} + goal["slot_value_table"].append(svt) + + blf_stt["goal"].append(goal) + + bs.append(blf_stt) + + return bs + +def get_slot_value_table(intent): + slot_value_table = [] + if not len(intent): + return slot_value_table + for item in intent: + svt = {"slot": item[0], + "value": item[1]} + slot_value_table.append(svt) + + return slot_value_table + + +def preprocess(args): + filenames = os.listdir(args.input_dir) + for filename in filenames: + ''' + add train/eval/test instruction + ''' + path = os.path.join(args.input_dir, filename) + data = read_json_file(path) + locale = filename.rsplit('.', 1)[0][-2:] + + turns = [] + for file_name, item in data.items(): + print(file_name) + t = {"turn": "multi", + "dialog": [], + "goal": get_belief_state(item["goal"]), + "knowledge": None} + turn = 1 + + for obj in item["log"]: + role = "USER" if turn % 2 else "SYS" + d = {"role": role, + "utterance": obj["text"], + "belief_state": get_belief_state(obj["metadata"]), + "querying_result": None, + "summary": None, + "locale": locale, + "topic": None, + "opinion": None, + "answer": None} + + if "dialog_act" in obj: + blf_stt = {"domain": "dialog_act", + "goal": []} + if len(obj["dialog_act"]) > 0: + for intent_name, intent_content in obj["dialog_act"].items(): + goal = {"intent": intent_name, + "slot_value_table": get_slot_value_table(intent_content)} + blf_stt["goal"].append(goal) + d["belief_state"].append(blf_stt) + + if "span_info" in obj: + blf_stt = {"domain": "span_info", + "goal": []} + if len(obj["span_info"]) > 0: + for elem in obj["span_info"]: + goal = {"intent": elem[0], + "slot_value_table": [{"slot": elem[1], "value": elem[2]}]} + blf_stt["goal"].append(goal) + d["belief_state"].append(blf_stt) + + t["dialog"].append(d) + turn += 1 + + turns.append(t) + + write_jsonl_file(turns, os.path.join(args.output_dir, filename[:-5]) + ".jsonl") + +if __name__ == "__main__": + args = parse() + preprocess(args) \ No newline at end of file diff --git a/src/preprocess/GoEmotions.py b/src/preprocess/GoEmotions.py new file mode 100644 index 0000000000000000000000000000000000000000..bbb3d02b6d7ffd3e650fdb7485f32e358794c08c --- /dev/null +++ b/src/preprocess/GoEmotions.py @@ -0,0 +1,70 @@ +import os +from utils import read_json_file, parse, write_jsonl_file +import pandas as pd + + +idx2emotions = [ + "admiration", + "amusement", + "anger", + "annoyance", + "approval", + "caring", + "confusion", + "curiosity", + "desire", + "disappointment", + "disapproval", + "disgust", + "embarrassment", + "excitement", + "fear", + "gratitude", + "grief", + "joy", + "love", + "nervousness", + "optimism", + "pride", + "realization", + "relief", + "remorse", + "sadness", + "surprise", +] + + +def preprocess(args, split): + output_file = os.path.join(args.output_dir, f"{split}.jsonl") + if split == "dev": + split = "valid" + + input_file = os.path.join(args.input_dir, f"{split}.csv") + + data = pd.read_csv(input_file) + + utterances = data["text"].values + emotion_ids = data["label"].values + + processed_data = [] + for index, utterance in enumerate(utterances): + dialogue = { + "turn": "single", + "locale": "en", + "dialog": [{"roles": ["USER"], "utterance": utterance, "emotions": []}], + } + + dialogue["dialog"][-1]["emotions"].append( + {"emotion": idx2emotions[emotion_ids[index]]} + ) + + processed_data.append(dialogue) + + write_jsonl_file(processed_data, output_file) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") + preprocess(args, "test") diff --git a/src/preprocess/Google_Simulated_Dialogue.py b/src/preprocess/Google_Simulated_Dialogue.py new file mode 100644 index 0000000000000000000000000000000000000000..0f04b47cc1795fa08076fcd8fbc17fd9d4e7b2d1 --- /dev/null +++ b/src/preprocess/Google_Simulated_Dialogue.py @@ -0,0 +1,180 @@ +from utils import read_json_file, write_jsonl_file, parse, write_json_file +import os + + +def get_aligned_index(text, non_space_len, include_tail_space=True): + cursor = 0 + while non_space_len >= 0 and cursor < len(text): + if not include_tail_space and non_space_len == 0: + break + if include_tail_space and non_space_len == 0 and text[cursor] != " ": + break + + if text[cursor] != " ": + non_space_len -= 1 + + cursor += 1 + + return cursor + + +def parse_slot2index(slots, text, tokens): + slot2index = dict() + prefix_length = [0] + for token in tokens: + prefix_length.append(prefix_length[-1] + len(token)) + + for trip in slots: + start = get_aligned_index(text, prefix_length[trip["start"]]) + end = get_aligned_index(text, prefix_length[trip["exclusive_end"]], False) + if trip["slot"] not in slot2index: + slot2index[trip["slot"]] = dict() + + value = text[start:end] + # if text not in [ + # "there are none available for 8 pm , but there is one at 6.15 pm . would you like 6.15 pm instead ?", + # "8 pm is unavailable at 8 pm has been declined by oren hummus . you can choose another time such as 7.15 pm , 7.30 pm , or 8.30 pm .", + # "8 pm is not available on wednesday . i was unable to book your table on wednesday night at cetrella for 3 people .", + # ]: + # assert value not in slot2index[trip["slot"]], value + if value not in slot2index[trip["slot"]]: + slot2index[trip["slot"]][value] = (start, end) + return slot2index + + +def get_slot_index(slot2index, value, slot): + if slot not in slot2index or value not in slot2index[slot]: + # print(slot, value) + return -1, -1 + return slot2index[slot][value] + + +def parse_dialogue_acts(acts, utter_dict, domain): + dialog_acts = [] + text = utter_dict["text"] + # print(utter_dict) + # print(acts) + # print() + slot2index = parse_slot2index(utter_dict["slots"], text, utter_dict["tokens"]) + + act_slot_values = dict() + for act in acts: + if act["type"] not in act_slot_values: + act_slot_values[act["type"]] = dict() + + if "slot" in act: + if act["slot"] not in act_slot_values[act["type"]]: + act_slot_values[act["type"]][act["slot"]] = [] + + if "value" in act: + act_slot_values[act["type"]][act["slot"]].append(act["value"]) + + for act in act_slot_values: + svt = [] + for slot, values in act_slot_values[act].items(): + svp = { + "slot": slot, + } + + if values: + svp["values"] = [] + svp["relation"] = "=" + for value in values: + start, end = get_slot_index(slot2index, value, slot) + if start != -1: + assert value == text[start:end], f"{value} {text[start: end]}" + svp["values"].append( + {"value": value, "start": start, "end": end} + ) + else: + svp["values"].append({"value": value}) + svt.append(svp) + + dialog_acts.append({"act": act, "slot_value_table": svt, "domain": domain}) + + return dialog_acts + + +def parse_dialogue_state(state, intent, domain, schema): + svt = [] + + for pair in state: + svt.append( + { + "slot": pair["slot"], + "values": [{"value": pair["value"]}], + "relation": "=", + } + ) + + schema[domain].add(pair["slot"]) + + dialog_state = [ + {"intent": intent, "informed_slot_value_table": svt, "domain": domain} + ] + + return dialog_state, schema + + +def reformat(args, file, domain): + path = args.input_dir + "/" + file + ".json" + data = read_json_file(path) + processed_data = [] + + schema = {domain: set()} + + for origin_dial in data: + dialog = {"turn": "multi", "locale": "en", "dialog": []} + origin_turns = origin_dial["turns"] + intent = origin_turns[0]["user_intents"] + + assert len(intent) == 1 + intent = intent[0] + + for origin_turn in origin_turns: + # system + if "system_utterance" in origin_turn: + new_turn = { + "roles": ["SYSTEM"], + "utterance": origin_turn["system_utterance"]["text"], + "dialog_acts": parse_dialogue_acts( + origin_turn["system_acts"], + origin_turn["system_utterance"], + domain, + ), + } + dialog["dialog"].append(new_turn) + + # user + bs, schema = parse_dialogue_state( + origin_turn["dialogue_state"], intent, domain, schema + ) + new_turn = { + "roles": ["USER"], + "utterance": origin_turn["user_utterance"]["text"], + "dialog_acts": parse_dialogue_acts( + origin_turn["user_acts"], origin_turn["user_utterance"], domain + ), + "belief_state": bs, + } + dialog["dialog"].append(new_turn) + processed_data.append(dialog) + + write_jsonl_file(processed_data, args.output_dir + "/" + file + ".jsonl") + + schema[domain] = sorted(list(schema[domain])) + + ontology = {domain: {slot: True for slot in schema[domain]}} + + write_json_file(ontology, os.path.join(args.output_dir, f"{file}_ontology.json")) + + +def preprocess(args, domain): + reformat(args, "train", domain) + reformat(args, "dev", domain) + reformat(args, "test", domain) + + +if __name__ == "__main__": + args = parse() + preprocess(args, args.domain) diff --git a/src/preprocess/HWU64.py b/src/preprocess/HWU64.py new file mode 100644 index 0000000000000000000000000000000000000000..ab58869e4d0381fe6db53aa032469cb1fe98c524 --- /dev/null +++ b/src/preprocess/HWU64.py @@ -0,0 +1,73 @@ +from utils import write_jsonl_file, read_csv_file, parse +import os +import pandas as pd + +# Refer https://github.com/alexa/dialoglue/blob/master/data_utils/intent_scripts/get_hwu_data.py + +LIST_OF_FILES = ( + "alarm_query.csv\nalarm_remove.csv\nalarm_set.csv\naudio_volum" + "e_down.csv\naudio_volume_mute.csv\naudio_volume_up.csv\ncalend" + "ar_query.csv\t\ncalendar_remove.csv\t\ncalendar_set.csv\t\ncoo" + "king_recipe.csv\t\ndatetime_convert.csv\t\ndatetime_query.csv" + "\t\nemail_addcontact.csv\t\nemail_query.csv\t\nemail_querycon" + "tact.csv\t\nemail_sendemail.csv\t\ngeneral_affirm.csv\t\ngener" + "al_commandstop.csv\t\ngeneral_confirm.csv\t\ngeneral_dontcare." + "csv\t\ngeneral_explain.csv\t\ngeneral_joke.csv\t\ngeneral_neg" + "ate.csv\t\ngeneral_praise.csv\t\ngeneral_quirky.csv\t\ngenera" + "l_repeat.csv\t\niot_cleaning.csv\t\niot_coffee.csv\t\niot_hue" + "_lightchange.csv\t\niot_hue_lightdim.csv\t\niot_hue_lightoff." + "csv\t\niot_hue_lighton.csv\t\niot_hue_lightup.csv\t\niot_wemo_" + "off.csv\t\niot_wemo_on.csv\t\nlists_createoradd.csv\t\nlists_" + "query.csv\t\nlists_remove.csv\t\nmusic_likeness.csv\t\nmusic_q" + "uery.csv\t\nmusic_settings.csv\t\nnews_query.csv\t\nplay_audio" + "book.csv\t\nplay_game.csv\t\nplay_music.csv\t\nplay_podcasts." + "csv\t\nplay_radio.csv\t\nqa_currency.csv\t\nqa_definition.csv" + "\t\nqa_factoid.csv\t\nqa_maths.csv\t\nqa_stock.csv\t\nrecomme" + "ndation_events.csv\t\nrecommendation_locations.csv\t\nrecomme" + "ndation_movies.csv\t\nsocial_post.csv\t\nsocial_query.csv\t\n" + "takeaway_order.csv\t\ntakeaway_query.csv\t\ntransport_query.c" + "sv\t\ntransport_taxi.csv\t\ntransport_ticket.csv\t\ntransport" + "_traffic.csv\t\nweather_query.csv\t".split() +) + + +def reformat(args, split): + if split == "train": + input_dir = os.path.join(args.input_dir, "trainset") + else: + input_dir = os.path.join(args.input_dir, "testset", "csv") + + dialogues = [] + for filename in LIST_OF_FILES: + data = pd.read_csv(os.path.join(input_dir, filename), sep=";") + for i in range(len(data)): + utterance = data.iloc[i]["answer_from_anno"] + domain = data.iloc[i]["scenario"] + intent = data.iloc[i]["intent"] + + dialogues.append( + { + "turn": "single", + "locale": "en", + "domain": domain, + "dialog": [ + { + "roles": ["USER"], + "utterance": utterance, + "active_intents": [f"{domain} {intent}"], + } + ], + } + ) + + write_jsonl_file(dialogues, os.path.join(args.output_dir, f"{split}.jsonl")) + + +def preprocess(args): + reformat(args, "train") + reformat(args, "test") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/IEMOCAP.py b/src/preprocess/IEMOCAP.py new file mode 100644 index 0000000000000000000000000000000000000000..4ee11d44055d1f061bb6c0e3dd4b2b14445984e2 --- /dev/null +++ b/src/preprocess/IEMOCAP.py @@ -0,0 +1,43 @@ +from utils import parse, write_jsonl_file, read_json_file +import os + +label2emotion = { + "fru": "frustrated", + "ang": "angry", + "neu": "neutral", + "exc": "excited", + "hap": "happy", + "sad": "sad", +} + + +def preprocess(args, split): + input_dir = os.path.join(args.input_dir, f"{split}_data.json") + + data = read_json_file(input_dir) + dialogues = [] + + cnt = 0 + + for dialogue in data: + dial = {"turn": "multi", "locale": "en", "dialog": []} + + for turn in dialogue: + _turn = { + "roles": [turn["speaker"]], + "utterance": turn["text"], + } + if "label" in turn: + _turn["emotions"] = [{"emotion": label2emotion[turn["label"]]}] + cnt += 1 + dial["dialog"].append(_turn) + + dialogues.append(dial) + write_jsonl_file(dialogues, os.path.join(args.output_dir, f"{split}.jsonl")) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") + preprocess(args, "test") diff --git a/src/preprocess/Incar.py b/src/preprocess/Incar.py new file mode 100644 index 0000000000000000000000000000000000000000..3b97d1edda4838e9001e32801228c087dc5ee4a4 --- /dev/null +++ b/src/preprocess/Incar.py @@ -0,0 +1,8 @@ +from Soccer import preprocess +from utils import parse + +if __name__ == "__main__": + args = parse() + preprocess(args, "train", "incar") + preprocess(args, "val", "incar") + preprocess(args, "test", "incar") diff --git a/src/preprocess/MAMS-ACSA.py b/src/preprocess/MAMS-ACSA.py new file mode 100644 index 0000000000000000000000000000000000000000..add1f6ab9d00e4e81f057fdc5d5950a999f491e8 --- /dev/null +++ b/src/preprocess/MAMS-ACSA.py @@ -0,0 +1,44 @@ +from utils import parse, write_jsonl_file +import xml.etree.ElementTree as ET +import os + + +def preprocess(args, split): + infile = os.path.join(args.input_dir, f"{split}.xml") + + if split == "val": + split = "dev" + + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + tree = ET.parse(infile) + processed_data = [] + + for sentence in tree.getroot(): + processed_data.append( + { + "turn": "single", + "locale": "en", + "dialog": [ + {"roles": ["USER"], "utterance": sentence[0].text, "aspects": []} + ], + } + ) + + for aspect in sentence[1]: + processed_data[-1]["dialog"][-1]["aspects"].append( + { + "category": aspect.attrib["category"], + "sentiment": aspect.attrib["polarity"], + } + ) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + + preprocess(args, "train") + preprocess(args, "val") + preprocess(args, "test") diff --git a/src/preprocess/MAMS-ATSA.py b/src/preprocess/MAMS-ATSA.py new file mode 100644 index 0000000000000000000000000000000000000000..88199901a52c3a9b32ca6e3eeeba27095d770343 --- /dev/null +++ b/src/preprocess/MAMS-ATSA.py @@ -0,0 +1,48 @@ +from utils import parse, write_jsonl_file +import xml.etree.ElementTree as ET +import os + + +def preprocess(args, split): + infile = os.path.join(args.input_dir, f"{split}.xml") + + if split == "val": + split = "dev" + + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + tree = ET.parse(infile) + processed_data = [] + + for sentence in tree.getroot(): + processed_data.append( + { + "turn": "single", + "locale": "en", + "dialog": [ + {"roles": ["USER"], "utterance": sentence[0].text, "aspects": []} + ], + } + ) + + for aspect in sentence[1]: + processed_data[-1]["dialog"][-1]["aspects"].append( + { + "target": { + "value": aspect.attrib["term"], + "start": int(aspect.attrib["from"]), + "end": int(aspect.attrib["to"]), + }, + "sentiment": aspect.attrib["polarity"], + } + ) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + + preprocess(args, "train") + preprocess(args, "val") + preprocess(args, "test") diff --git a/src/preprocess/MASSIVE.py b/src/preprocess/MASSIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..1584f63209b74db53ed8d82457d4f234d9ead8bc --- /dev/null +++ b/src/preprocess/MASSIVE.py @@ -0,0 +1,163 @@ +import re +import os +from utils import write_jsonl_file, parse +from utils import read_jsonl_file + + +def readfile(input_dir, filename): + path = os.path.join(input_dir, filename) + data = read_jsonl_file(path) + return data + + +def is_space_language(language): + if language in ["zh-CN", "ja-JP", "zh-TW"]: + return False + return True + + +def get_slot_value_table(utterance, language, origin): + svt = [] + pattern = re.compile("\[(.*?)\]", re.S) + svp_iter = re.finditer(pattern, utterance) + + delta_length = 0 # record the delta length caused by annotation + prev_end = -1 + + for svp in svp_iter: + start = svp.start() + end = svp.end() + annotaed = utterance[start + 1 : end - 1] + slot, value = map(lambda x: x.strip(), annotaed.split(":")) + + origin_start = start + is_space = is_space_language(language) + # offset 1 for non-space-seperated language + if ( + not is_space + and start > 0 + and utterance[start - 1] == " " + and prev_end + 1 != start + ): + origin_start -= 1 + + # offset the delta length + origin_start -= delta_length + origin_end = origin_start + len(value) + + if not is_space: + if origin[origin_start:origin_end] != value: + for delta_offset in range(-3, 4): + if ( + origin[delta_offset + origin_start : delta_offset + origin_end] + == value + ): + origin_start += delta_offset + origin_end += delta_offset + + break + + # update delta length + if ( + origin_end < len(origin) + and end < len(utterance) + and origin[origin_end] == utterance[end] + ): + delta_length = end - origin_end + + else: + delta_length = end - origin_end + 1 + + else: + # update delta length + cur_delta_length = end - start - len(value) + + if not is_space: + # head space + if start > 0 and utterance[start - 1] == " ": + cur_delta_length += 1 + + if end < len(utterance) and utterance[end] == " ": + cur_delta_length += 1 + + delta_length += cur_delta_length + + assert origin[origin_start:origin_end] == value + + svt.append( + { + "slot": slot, + "value": value, + "start": origin_start, + "end": origin_end, + "relation": "equal_to", + } + ) + + prev_end = end + + return svt + + +def preprocess(args): + filenames = os.listdir(args.input_dir) + data = {"train": [], "dev": [], "test": [], "MMNLU-22": []} + + total = 0 + for _ in filenames: + total += 1 + + cur = 0 + + for filename in filenames: + cur += 1 + print(f"preprocessing {filename} ({cur}/{total})") + origin_data = readfile(args.input_dir, filename) + for line in origin_data: + partition = line["partition"] + + turn = dict() + turn["role"] = "ROLE" + turn["utterance"] = line["utt"] + + domain = None + if "annot_utt" in line: + domain = [line["scenario"]] + bs = [] + goal = dict() + goal["intent"] = line["intent"] # format: {domain}_{intent} + slot_value_table = get_slot_value_table( + line["annot_utt"], line["locale"], turn["utterance"] + ) + goal["slot_value_table"] = slot_value_table + goal["active_intent"] = line["intent"] + bs.append(goal) + + turn["belief_state"] = [bs] + else: + turn["belief_state"] = [] + + if domain is None: + data[partition].append( + {"turn": "single", "locale": line["locale"], "dialog": [turn]} + ) + else: + data[partition].append( + { + "turn": "single", + "locale": line["locale"], + "dialog": [turn], + "domain": domain, + } + ) + + for partition in data: + if data[partition]: + write_jsonl_file( + data[partition], os.path.join(args.output_dir, f"{partition}.jsonl") + ) + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/MELD.py b/src/preprocess/MELD.py new file mode 100644 index 0000000000000000000000000000000000000000..b96dc3cb370b2c1703ecff73a5f87c1ec403bb96 --- /dev/null +++ b/src/preprocess/MELD.py @@ -0,0 +1,42 @@ +import os +from utils import write_jsonl_file, read_csv_file, parse + + +def get_knowledge(input_dir): + path = os.path.join(input_dir, "train_sent_emo.csv") + data = read_csv_file(path) + emotions = set() + for i in range(1, len(data)): + emotions.add(data.iloc[i][3]) + + return sorted(emotions) + + +def reformat(args, file): + path = os.path.join(args.input_dir, f"{file}_sent_emo.csv") + data = read_csv_file(path) + turns = [] + for i in range(len(data)): + ds = data.iloc[i] + t = { + "turn": "single", + "locale": "en", + "dialog": [], + } + d = { + "roles": [ds[2]], + "utterance": ds[1], + "emotions": [{"emotion": ds[3], "sentiment": ds[4]}], + } + t["dialog"].append(d) + + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl") + + +if __name__ == "__main__": + args = parse() + reformat(args, "train") + reformat(args, "dev") + reformat(args, "test") diff --git a/src/preprocess/MInDS-14.py b/src/preprocess/MInDS-14.py new file mode 100644 index 0000000000000000000000000000000000000000..d8db3686205a9d900138e9a137908d08dc255c65 --- /dev/null +++ b/src/preprocess/MInDS-14.py @@ -0,0 +1,55 @@ +import os + +# import pandas as pd +from utils import read_csv_file, write_jsonl_file, parse + +# new read_in function +# def read_csv_file(filename): +# with open(filename, "r", encoding="utf8") as fr: +# return pd.read_csv(fr) + + +def preprocess(args): + filenames = os.listdir(args.input_dir) + for filename in filenames: + """ + add train/eval/test instruction + """ + + if not filename.endswith(".csv"): + continue + + path = os.path.join(args.input_dir, filename) + data = read_csv_file(path) + turns = [] + locale = filename[:5] if filename != "aux-en.csv" else "en-US" + + for i in data.index: + t = {"turn": "single", "locale": locale, "dialog": []} + item = data.iloc[i] + d = { + "role": "ROLE", + "utterance": item["text_asr"] if filename != "aux-en.csv" else item[0], + "active_intents": [ + item["intent"] if filename != "aux-en.csv" else item[1] + ], + } + t["dialog"].append(d) + turns.append(t) + + if locale != "en-US": + t_en = {"turn": "single", "locale": "en-US", "dialog": []} + d = { + "role": "ROLE", + "utterance": item["text_translated"], + "active_intents": [item["intent"]], + } + t_en["dialog"].append(d) + turns.append(t_en) + + write_jsonl_file(turns, args.output_dir + "/" + filename[:5] + ".jsonl") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/MKQA_integrated.py b/src/preprocess/MKQA_integrated.py new file mode 100644 index 0000000000000000000000000000000000000000..50b80a9a5ac6a4f4d453b01c83175ec5aad016f7 --- /dev/null +++ b/src/preprocess/MKQA_integrated.py @@ -0,0 +1,45 @@ +import os +from utils import read_jsonl_file, write_jsonl_file, parse + +''' +This is script is used to process the whole file of MKQA +Every two sentences (QA objects) in a line share the same language +''' + +def preprocess(args): + path = os.path.join(args.input_dir, "mkqa.jsonl") + data = read_jsonl_file(path) + ''' + add add train/eval/test instruction and language chosen + ''' + locales = list(data[0]["answers"].keys()) + pack = [] + + for line in data: + for locale in locales: + t = { + "turn": "multi", + "locale": locale, + "dialog": [] + } + + que = { + "role": "question", + "utterance": line["queries"][locale] + } + ans = { + "role": "answer", + "utterance": line["answers"][locale][0]["text"] + } + + t["dialog"].append(que) + t["dialog"].append(ans) + + pack.append(t) + + write_jsonl_file(pack, os.path.join(args.output_dir, "mkqa_preprocessed.jsonl")) + + +if __name__ == "__main__": + args = parse() + preprocess(args) \ No newline at end of file diff --git a/src/preprocess/MKQA_seperated .py b/src/preprocess/MKQA_seperated .py new file mode 100644 index 0000000000000000000000000000000000000000..c89895c2779fa309066ec56ad16e5d403be26a32 --- /dev/null +++ b/src/preprocess/MKQA_seperated .py @@ -0,0 +1,66 @@ +import os +from utils import read_jsonl_file, write_jsonl_file, parse + +''' +This script can divide the whole dataset into 26 parts via language +''' + +def preprocess(args): + path = os.path.join(args.input_dir, "mkqa.jsonl") + data = read_jsonl_file(path) + ''' + add add train/eval/test instruction and language chosen + ''' + locales = list(data[0]["answers"].keys()) + + for locale in locales: + turns = [] + + for QA in data: + t = {"turn": "single", + "dialog": [], + "knowledge": None, + "goal": None, + "QA": None} + + que = {"role": "ROLE1", + "utterance": QA["queries"][locale], + "utter_trans": QA["query"], # new feature + "slot_value_table": [], + "summary": None, + "locale": locale, + "scenario": None, + "intent": None, + "topic": None, + "answer": None} + + # alternate answers + aliases = [] if "aliases" not in QA["answers"][locale][0] else QA["answers"][locale][0]["aliases"] + + ans_svt = {"slot": QA["answers"][locale][0]["type"], + "value": QA["answers"][locale][0]["text"], + "act": None, + "aliases": aliases} + + ans = {"role": "ROLE2", + "utterance": ans_svt["value"], + "utter_trans": QA["answers"]["en"][0]["text"], # new feature + "slot_value_table": ans_svt, + "summary": None, + "locale": locale, + "scenario": None, + "intent": None, + "topic": None, + "answer": ans_svt["value"]} + + t["dialog"].append(que) + t["dialog"].append(ans) + + turns.append(t) + + write_jsonl_file(turns, os.path.join(args.output_dir, locale + ".jsonl")) + + +if __name__ == "__main__": + args = parse() + preprocess(args) \ No newline at end of file diff --git a/src/preprocess/MTOP.py b/src/preprocess/MTOP.py new file mode 100644 index 0000000000000000000000000000000000000000..85b03c98ddf13baf179c086296e0ffcb3019cf24 --- /dev/null +++ b/src/preprocess/MTOP.py @@ -0,0 +1,72 @@ +import os +import re +import sys +from utils import read_txt_file, write_jsonl_file, parse + + +def readfile(path): + data = read_txt_file(path) + return data + + +def get_slot_value_table(decouple): + svt = [] + pattern = re.compile(r'[[](.*?)[]]', re.S) + slot_list = re.findall(pattern, decouple) + dic = {} + for item in slot_list: + slot_value = item.split(":")[-1] + pair = slot_value.strip().split(' ', 1) + dic["slot"] = pair[0] + dic["value"] = pair[-1] + svt.append(dic) + dic = {} + return svt + + +def preprocess(args): + filenames = os.listdir(args.input_dir) + for filename in filenames: + if len(filename) > 2: + continue + + # Aiming at different path + # path = os.path.join(args.input_dir, filename) + # if not os.path.isdir(path): + # continue + # path = os.path.join(path, args.dataset + ".txt") + + path = os.path.join(args.input_dir, filename, args.dataset + ".txt") + data = readfile(path) + turns = [] + + + for line in data: + t = {} + t["turn"] = "single" + t["dialog"] = [] + + d = {} + elem = line.split("\t") + d["role"] = "ROLE" + d["utterance"] = elem[3] + d["slot_value_table"] = get_slot_value_table(elem[6]) + d["summary"] = None + d["locale"] = elem[5][:2] + d["scenario"] = elem[4] + d["intent"] = elem[1].split(":")[1] + d["answer"] = None + t["dialog"].append(d) + + t["knowledge"] = None + t["goal"] = None + turns.append(t) + write_jsonl_file(turns, os.path.join(args.output_dir, filename + '_' + args.dataset + ".jsonl")) + + +if __name__ == "__main__": + args = parse() + if args.dataset not in ["train", "test", "eval"]: + print("Wrong dataset type") + sys.exit(1) + preprocess(args) diff --git a/src/preprocess/Molweni.py b/src/preprocess/Molweni.py new file mode 100644 index 0000000000000000000000000000000000000000..ed09cdbb7764245f5ab9f33c8fa92996460cd939 --- /dev/null +++ b/src/preprocess/Molweni.py @@ -0,0 +1,49 @@ +import os +from utils import parse, read_json_file, write_jsonl_file + + +def parse_dialogue_knowledge(utterances): + dialogue = [] + for utter in utterances: + dialogue.append({"roles": [utter["speaker"]], "utterance": utter["text"]}) + + return {"type": "dict", "value": {"dialogue": dialogue}} + + +def preprocess(args, split): + infile = os.path.join(args.input_dir, f"{split}.json") + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + data = read_json_file(infile)["data"]["dialogues"] + processed_data = [] + + for example in data: + knowledge = parse_dialogue_knowledge(example["edus"]) + + for qa in example["qas"]: + dial = { + "turn": "single", + "locale": "en", + "dialog": [], + "knowledge": knowledge, + } + + dial["dialog"].append({"roles": ["USER"], "utterance": qa["question"]}) + + if not qa["answers"]: + dial["dialog"].append({"roles": ["SYSTEM"], "utterance": "None"}) + else: + for answer in qa["answers"]: + dial["dialog"].append( + {"roles": ["SYSTEM"], "utterance": answer["text"]} + ) + processed_data.append(dial) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") + preprocess(args, "test") diff --git a/src/preprocess/MuTual.py b/src/preprocess/MuTual.py new file mode 100644 index 0000000000000000000000000000000000000000..edc6496bdd169ed4d78f1fed80e23b37fb7b5023 --- /dev/null +++ b/src/preprocess/MuTual.py @@ -0,0 +1,132 @@ +import os +from utils import parse, read_json_file, write_jsonl_file, choices +import re +from tqdm import tqdm +import random + +options = list(range(4)) + + +def refine_roles_of_dialog(example): + final = example["options"][0][0] + + assert final.lower() in ["m", "f"] + + lowercase = True + if final in ["M", "F"]: + lowercase = False + + if lowercase: + turns = example["article"].split("m ; f :") + roles = ["m :", "f :"] + else: + turns = example["article"].split("M;F:") + roles = ["M:", "F:"] + + role_idx = 0 if final.lower() == "m" else 1 + role_idx = (role_idx + (len(turns) % 2) + 1) % 2 + + new_turns = [] + + for turn in turns: + if not turn: + continue + new_turns.append(roles[role_idx]) + new_turns.append(turn) + role_idx = 1 - role_idx + + return new_turns + + +def get_an_order_of_choices(label, sep): + wrong_choices = options[:label] + options[label + 1 :] + random.shuffle(wrong_choices) + + choices_order = [label] + wrong_choices + + return sep.join([choices[idx] for idx in choices_order]) + + +def preprocess(args, split, part): + indir = os.path.join(os.path.join(args.input_dir, part), split) + outfile = os.path.join(os.path.join(args.output_dir, part), f"{split}.jsonl") + + processed_data = [] + for filename in tqdm(os.listdir(indir)): + filepath = os.path.join(indir, filename) + example = read_json_file(filepath) + + dial = {"turn": "multi", "locale": "en", "dialog": []} + + if "plus" not in part: + turns = re.split("([mf] :)", example["article"]) + else: + turns = re.split("([MF]:)", example["article"]) + if not turns[0]: + turns = turns[1:] + assert len(turns) % 2 == 0, example + else: + turns = refine_roles_of_dialog(example) + # print(turns) + # print(example) + for i in range(0, len(turns), 2): + role = turns[i] + utterance = turns[i + 1] + + if "plus" not in part: + assert ( + len(role) == 3 + and role[0] in ["m", "f"] + and role[1] == " " + and role[2] == ":" + ) + else: + assert len(role) == 2 and role[0] in ["M", "F"] and role[1] == ":" + + if role[0].lower() == "m": + role = "male" + else: + role = "female" + + dial["dialog"].append({"roles": [role], "utterance": utterance.strip()}) + + dial["knowledge"] = {"type": "dict", "value": {}} + + for idx, option in enumerate(example["options"]): + role, utterance = option.split(":", 1) + role = role.strip().lower() + + assert role in ["m", "f"] + + if role == "m": + role = "male" + else: + role = "female" + + # utterance = f"{role}: {utterance.strip()}" + + # dial["dialog"].append( + # {"roles": [f"{choices[idx]} choice"], "utterance": utterance} + # ) + dial["knowledge"]["value"][chr(ord("A") + idx)] = utterance.strip() + + # label = ord(example["answers"]) - ord("A") + # assert 0 <= label < 4, example["answers"] + + # This task requires to predicts the order of choices. + # NOTE: we put the correct answer at the beginning and other options are shuffled. + # dial["dialog"][-1]["roles_to_select"] = [get_an_order_of_choices(label, ", ")] + dial["dialog"][-1]["roles_to_select"] = [example["answers"]] + + processed_data.append(dial) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + random.seed(args.seed) + preprocess(args, "train", "mutual") + preprocess(args, "dev", "mutual") + preprocess(args, "train", "mutual_plus") + preprocess(args, "dev", "mutual_plus") diff --git a/src/preprocess/Multi2WOZ.py b/src/preprocess/Multi2WOZ.py new file mode 100644 index 0000000000000000000000000000000000000000..5f1fd84c84ac529f560c11191621cc23bd962f11 --- /dev/null +++ b/src/preprocess/Multi2WOZ.py @@ -0,0 +1,83 @@ +import os +from utils import read_json_file, write_jsonl_file, parse + + +def get_belief_state(metadata): + bs = [] + + if not len(metadata): + return bs + + for d_name, domain in metadata.items(): + blf_stt = {"domain": d_name, + "goal": []} + + for i_name, intent in domain.items(): + goal = {"intent": i_name, + "slot_value_table": []} + + for slot, value in intent.items(): + if isinstance(value, (list, dict)) and len(value) > 0: + for item in value: + for inner_slot, inner_value in item.items(): + real_slot = slot + '_' + inner_slot + svt = {"slot": real_slot, + "value": inner_value} + goal["slot_value_table"].append(svt) + + else: + svt = {"slot": slot, + "value": value} + goal["slot_value_table"].append(svt) + + blf_stt["goal"].append(goal) + + bs.append(blf_stt) + + return bs + + +def preprocess(args): + dirnames = [os.path.join(args.input_dir, dir) for dir in os.listdir(args.input_dir)] + for dir in dirnames: + ''' + add train/eval/test instruction + ''' + filenames = os.listdir(dir) + locale = dir[-2:] + + for filename in filenames: + path = os.path.join(dir, filename) + data = read_json_file(path) + + turns = [] + for _, item in data.items(): + t = {"turn": "multi", + "dialog": [], + "knowledge": None} + + tag = "log-cn" if locale == "zh" else ("log-" + locale) + + for obj in item[tag]: + bs = get_belief_state(obj["metadata"]) + role = "USER" if not len(bs) else "SYS" + d = {"role": role, + "utterance": obj["text"], + "belief_state": bs, + "querying_result": None, + "summary": None, + "locale": locale, + "topic": None, + "opinion": None, + "answer": None} + + t["dialog"].append(d) + + turns.append(t) + + write_jsonl_file(turns, os.path.join(args.output_dir, filename[:-5]) + ".jsonl") + + +if __name__ == "__main__": + args = parse() + preprocess(args) \ No newline at end of file diff --git a/src/preprocess/MultiDoGo.py b/src/preprocess/MultiDoGo.py new file mode 100644 index 0000000000000000000000000000000000000000..0d18f1d013bdf3a5a64bb9fc3cf4a4c6a0f637a6 --- /dev/null +++ b/src/preprocess/MultiDoGo.py @@ -0,0 +1,93 @@ +import os +from utils import read_tsv_file, parse, write_jsonl_file + + +def parse_slot_value_table(utterance, slot_labels): + tokens = utterance.split(" ") + slot_labels = slot_labels.split() + start = -1 + record_slot = None + slots = [] + for idx, slot in enumerate(slot_labels): + if slot != "O": + # start + if start < 0: + start = idx + record_slot = slot + # end + if idx == (len(slot_labels) - 1) or slot_labels[idx + 1] == "O": + slots.append((record_slot, start, idx + 1)) + start = -1 + + svt = [] + prefix_lengths = [0] + for token in tokens: + prefix_lengths.append(prefix_lengths[-1] + len(token) + 1) + + for slot in slots: + start = prefix_lengths[slot[1]] + end = prefix_lengths[slot[2]] - 1 + assert utterance[start:end] == " ".join(tokens[slot[1] : slot[2]]) + + svt.append( + { + "slot": slot[0], + "values": [{"value": utterance[start:end], "start": start, "end": end}], + "relation": "=", + } + ) + + return svt + + +def preprocess(args, split): + input_dir = args.input_dir + + for domain in os.listdir(input_dir): + domain_dir = os.path.join(input_dir, domain) + outdir = os.path.join(args.output_dir, domain) + infile = os.path.join(domain_dir, f"{split}.tsv") + outfile = os.path.join(outdir, f"{split}.jsonl") + + data = read_tsv_file(infile) + processed_data = [] + + for i in range(len(data)): + row = data.iloc[i] + utterance = str(row["utterance"]) + intent = str(row["intent"]) + slot_labels = str(row["slot-labels"]) + + assert len(utterance.split(" ")) == len( + slot_labels.split(" ") + ), f"{utterance}\t{slot_labels}" + utterance = utterance.replace("\n", "\\n") + + processed_data.append( + { + "turn": "single", + "locale": "en", + "dialog": [ + { + "roles": ["USER"], + "utterance": utterance, + "active_intents": [intent], + "slots_to_fill": { + "intent": intent, + "slot_value_table": parse_slot_value_table( + utterance, slot_labels + ), + }, + } + ], + } + ) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") + preprocess(args, "test") diff --git a/src/preprocess/MultiWOZ_2.1.py b/src/preprocess/MultiWOZ_2.1.py new file mode 100644 index 0000000000000000000000000000000000000000..fb07d6e0cb33995532ac693ee72dd97035fb50d7 --- /dev/null +++ b/src/preprocess/MultiWOZ_2.1.py @@ -0,0 +1,584 @@ +# Refer https://github.com/thu-spmi/damd-multiwoz/blob/master/preprocess.py + +from utils import parse, read_json_file, read_line_labels, write_json_file +import os, re, json, spacy +from collections import OrderedDict +from tqdm import tqdm +import shutil + +all_domains = [ + "restaurant", + "hotel", + "attraction", + "train", + "taxi", + "police", + "hospital", +] + +mapping_pairs = { + "it 's": "it is", + "don 't": "do not", + "doesn 't": "does not", + "didn 't": "did not", + "you 'd": "you would", + "you 're": "you are", + "you 'll": "you will", + "i 'm": "i am", + "they 're": "they are", + "that 's": "that is", + "what 's": "what is", + "couldn 't": "could not", + "i 've": "i have", + "we 've": "we have", + "can 't": "cannot", + "i 'd": "i would", + "aren 't": "are not", + "isn 't": "is not", + "wasn 't": "was not", + "weren 't": "were not", + "won 't": "will not", + "there 's": "there is", + "there 're": "there are", + ". .": ".", + "restaurants": "restaurant -s", + "hotels": "hotel -s", + "laptops": "laptop -s", + "cheaper": "cheap -er", + "dinners": "dinner -s", + "lunches": "lunch -s", + "breakfasts": "breakfast -s", + "expensively": "expensive -ly", + "moderately": "moderate -ly", + "cheaply": "cheap -ly", + "prices": "price -s", + "places": "place -s", + "venues": "venue -s", + "ranges": "range -s", + "meals": "meal -s", + "locations": "location -s", + "areas": "area -s", + "policies": "policy -s", + "children": "child -s", + "kids": "kid -s", + "kidfriendly": "kid friendly", + "cards": "card -s", + "upmarket": "expensive", + "inpricey": "cheap", + "inches": "inch -s", + "uses": "use -s", + "dimensions": "dimension -s", + "driverange": "drive range", + "includes": "include -s", + "computers": "computer -s", + "machines": "machine -s", + "families": "family -s", + "ratings": "rating -s", + "constraints": "constraint -s", + "pricerange": "price range", + "batteryrating": "battery rating", + "requirements": "requirement -s", + "drives": "drive -s", + "specifications": "specification -s", + "weightrange": "weight range", + "harddrive": "hard drive", + "batterylife": "battery life", + "businesses": "business -s", + "hours": "hour -s", + "one": "1", + "two": "2", + "three": "3", + "four": "4", + "five": "5", + "six": "6", + "seven": "7", + "eight": "8", + "nine": "9", + "ten": "10", + "eleven": "11", + "twelve": "12", + "anywhere": "any where", + "good bye": "goodbye", +} + +normlize_slot_names = { + "car type": "car", + "entrance fee": "price", + "duration": "time", + "leaveat": "leave", + "arriveby": "arrive", + "trainid": "id", +} + + +def clean_time(utter): + utter = re.sub( + r"(\d+) ([ap]\.?m)", lambda x: x.group(1) + x.group(2), utter + ) # 9 am -> 9am + utter = re.sub(r"((? 'abc . xyz' + text = re.sub(r"(\w+)\.\.? ", r"\1 . ", text) # if 'abc. ' -> 'abc . ' + + for fromx in mapping_pairs: + tox = mapping_pairs[fromx] + text = " " + text + " " + text = text.replace(" " + fromx + " ", " " + tox + " ")[1:-1] + + return text + + +def clean_slot_values(domain, slot, value): + value = clean_text(value) + if not value: + value = "" + elif value == "not mentioned": + value = "" + elif domain == "attraction": + if slot == "name": + if value == "t": + value = "" + if value == "trinity": + value = "trinity college" + elif slot == "area": + if value in ["town centre", "cent", "center", "ce"]: + value = "centre" + elif value in ["ely", "in town", "museum", "norwich", "same area as hotel"]: + value = "" + elif value in ["we"]: + value = "west" + elif slot == "type": + if value in ["m", "mus", "musuem"]: + value = "museum" + elif value in ["art", "architectural"]: + value = "architecture" + elif value in ["churches"]: + value = "church" + elif value in ["coll"]: + value = "college" + elif value in ["concert", "concerthall"]: + value = "concert hall" + elif value in ["night club"]: + value = "nightclub" + elif value in ["mutiple sports", "mutliple sports", "sports", "galleria"]: + value = "multiple sports" + elif value in ["ol", "science", "gastropub", "la raza"]: + value = "" + elif value in ["swimmingpool", "pool"]: + value = "swimming pool" + elif value in ["fun"]: + value = "entertainment" + + elif domain == "hotel": + if slot == "area": + if value in ["cen", "centre of town", "near city center", "center"]: + value = "centre" + elif value in ["east area", "east side"]: + value = "east" + elif value in ["in the north", "north part of town"]: + value = "north" + elif value in ["we"]: + value = "west" + elif slot == "day": + if value == "monda": + value = "monday" + elif value == "t": + value = "tuesday" + elif slot == "name": + if value == "uni": + value = "university arms hotel" + elif value == "university arms": + value = "university arms hotel" + elif value == "acron": + value = "acorn guest house" + elif value == "ashley": + value = "ashley hotel" + elif value == "arbury lodge guesthouse": + value = "arbury lodge guest house" + elif value == "la": + value = "la margherit" + elif value == "no": + value = "" + elif slot == "internet": + if value == "does not": + value = "no" + elif value in ["y", "free", "free internet"]: + value = "yes" + elif value in ["4"]: + value = "" + elif slot == "parking": + if value == "n": + value = "no" + elif value in ["free parking"]: + value = "yes" + elif value in ["y"]: + value = "yes" + elif slot in ["pricerange", "price range"]: + slot = "pricerange" + if value == "moderately": + value = "moderate" + elif value in ["any"]: + value = "do n't care" + elif value in ["any"]: + value = "do n't care" + elif value in ["inexpensive"]: + value = "cheap" + elif value in ["2", "4"]: + value = "" + elif slot == "stars": + if value == "two": + value = "2" + elif value == "three": + value = "3" + elif value in ["4-star", "4 stars", "4 star", "four star", "four stars"]: + value = "4" + elif slot == "type": + if value == "0 star rarting": + value = "" + elif value == "guesthouse": + value = "guest house" + elif value not in ["hotel", "guest house", "do n't care"]: + value = "" + elif domain == "restaurant": + if slot == "area": + if value in [ + "center", + "scentre", + "center of town", + "city center", + "cb30aq", + "town center", + "centre of cambridge", + "city centre", + ]: + value = "centre" + elif value == "west part of town": + value = "west" + elif value == "n": + value = "north" + elif value in ["the south"]: + value = "south" + elif value not in [ + "centre", + "south", + "do n't care", + "west", + "east", + "north", + ]: + value = "" + elif slot == "day": + if value == "monda": + value = "monday" + elif value == "t": + value = "tuesday" + elif slot in ["pricerange", "price range"]: + slot = "pricerange" + if value in ["moderately", "mode", "mo"]: + value = "moderate" + elif value in ["not"]: + value = "" + elif value in ["inexpensive", "ch"]: + value = "cheap" + elif slot == "food": + if value == "barbecue": + value = "barbeque" + elif slot == "pricerange": + if value == "moderately": + value = "moderate" + elif slot == "time": + if value == "9:00": + value = "09:00" + elif value == "9:45": + value = "09:45" + elif value == "1330": + value = "13:30" + elif value == "1430": + value = "14:30" + elif value == "9:15": + value = "09:15" + elif value == "9:30": + value = "09:30" + elif value == "1830": + value = "18:30" + elif value == "9": + value = "09:00" + elif value == "2:00": + value = "14:00" + elif value == "1:00": + value = "13:00" + elif value == "3:00": + value = "15:00" + elif domain == "taxi": + if slot in ["arriveBy", "arrive by"]: + slot = "arriveby" + if value == "1530": + value = "15:30" + elif value == "15 minutes": + value = "" + elif slot in ["leaveAt", "leave at"]: + slot = "leaveat" + if value == "1:00": + value = "01:00" + elif value == "21:4": + value = "21:04" + elif value == "4:15": + value = "04:15" + elif value == "5:45": + value = "05:45" + elif value == "0700": + value = "07:00" + elif value == "4:45": + value = "04:45" + elif value == "8:30": + value = "08:30" + elif value == "9:30": + value = "09:30" + value = value.replace(".", ":") + + elif domain == "train": + if slot in ["arriveBy", "arrive by"]: + slot = "arriveby" + if value == "1": + value = "01:00" + elif value in ["does not care", "doesnt care", "doesn't care"]: + value = "do n't care" + elif value == "8:30": + value = "08:30" + elif value == "not 15:45": + value = "" + value = value.replace(".", ":") + elif slot == "day": + if value == "doesnt care" or value == "doesn't care": + value = "do n't care" + elif slot in ["leaveAt", "leave at"]: + slot = "leaveat" + if value == "2:30": + value = "02:30" + elif value == "7:54": + value = "07:54" + elif value == "after 5:45 pm": + value = "17:45" + elif value in ["early evening", "friday", "sunday", "tuesday", "afternoon"]: + value = "" + elif value == "12": + value = "12:00" + elif value == "1030": + value = "10:30" + elif value == "1700": + value = "17:00" + elif value in [ + "does not care", + "doesnt care", + "do nt care", + "doesn't care", + ]: + value = "do n't care" + + value = value.replace(".", ":") + if value in ["dont care", "don't care", "do nt care", "doesn't care"]: + value = "do n't care" + if normlize_slot_names.get(slot): + slot = normlize_slot_names[slot] + return slot, value + + +def parse_belief_state(raw, dial_domains, constraint_dict, model): + for domain in dial_domains: + if not constraint_dict.get(domain): + constraint_dict[domain] = OrderedDict() + info_sv = raw[domain]["semi"] + for s, v in info_sv.items(): + s, v = clean_slot_values(domain, s, v) + if len(v.split()) > 1: + v = " ".join([token.text for token in model(v)]).strip() + if v != "": + constraint_dict[domain][s] = v + book_sv = raw[domain]["book"] + for s, v in book_sv.items(): + if s == "booked": + continue + s, v = clean_slot_values(domain, s, v) + if len(v.split()) > 1: + v = " ".join([token.text for token in model(v)]).strip() + if v != "": + constraint_dict[domain][s] = v + + belief_state = [] + for domain in constraint_dict: + cur_domain_bs = { + "intent": "", # FIXME + "informed_slot_value_table": [], + "requested_slots": [], # FIXME + "domain": domain, + } + for slot in constraint_dict[domain]: + cur_domain_bs["informed_slot_value_table"].append( + { + "slot": slot, + "values": [ + { + "value": constraint_dict[domain][slot], + "cononical_value": constraint_dict[domain][slot], + } + ], + "relation": "=", + } + ) + + belief_state.append(cur_domain_bs) + + return constraint_dict, belief_state + + +def preprocess(args): + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + data = read_json_file(os.path.join(args.input_dir, "data.json")) + test_split = read_line_labels(os.path.join(args.input_dir, "testListFile.txt")) + dev_split = read_line_labels(os.path.join(args.input_dir, "valListFile.txt")) + + model = spacy.load("en_core_web_sm") + + with open(os.path.join(args.output_dir, "train.jsonl"), "w") as train_writer, open( + os.path.join(args.output_dir, "dev.jsonl"), "w" + ) as dev_writer, open( + os.path.join(args.output_dir, "test.jsonl"), "w" + ) as test_writer: + for dial_id in tqdm(data): + origin_dialog = data[dial_id] + + constraint_dict = OrderedDict() + + # preprocess current dialogue + dialog = { + "turn": "multi", + "domain": [], # FIXME + "dialog": [], + } + + # parse all domains + dial_domains = [] + for dom, g in origin_dialog["goal"].items(): + if dom != "topic" and dom != "message" and g: + if dom in all_domains: + dial_domains.append(dom) + + for _, dial_turn in enumerate(origin_dialog["log"]): + dial_state = dial_turn["metadata"] + utterance = " ".join(clean_text(dial_turn["text"]).split()) + if not dial_state: # user + new_turn = { + "roles": ["USER"], + "utterance": utterance, + "dialog_acts": [], # FIXME + } + + else: # system + constraint_dict, new_bf_state = parse_belief_state( + dial_state, dial_domains, constraint_dict, model + ) + dialog["dialog"][-1]["belief_state"] = new_bf_state + new_turn = { + "roles": ["SYSTEM"], + "utterance": utterance, + "dialog_acts": [], # FIXME + } + + dialog["dialog"].append(new_turn) + + # write to file + if dial_id in test_split: + test_writer.write(json.dumps(dialog) + "\n") + elif dial_id in dev_split: + dev_writer.write(json.dumps(dialog) + "\n") + else: + train_writer.write(json.dumps(dialog) + "\n") + + +if __name__ == "__main__": + args = parse() + preprocess(args) + + schema = { + "taxi": ["leave", "destination", "departure", "arrive"], + "police": [], + "hospital": ["department"], + "hotel": [ + "type", + "parking", + "pricerange", + "internet", + "stay", + "day", + "people", + "area", + "stars", + "name", + ], + "attraction": ["area", "type", "name"], + "train": ["destination", "day", "arrive", "departure", "people", "leave"], + "restaurant": ["food", "pricerange", "area", "name", "time", "day", "people"], + } + + ontology = {domain: {slot: True for slot in schema[domain]} for domain in schema} + + write_json_file(ontology, os.path.join(args.output_dir, "train_ontology.json")) + shutil.copyfile( + os.path.join(args.output_dir, "train_ontology.json"), + os.path.join(args.output_dir, "dev_ontology.json"), + ) + shutil.copyfile( + os.path.join(args.output_dir, "train_ontology.json"), + os.path.join(args.output_dir, "test_ontology.json"), + ) diff --git a/src/preprocess/MultiWOZ_2.2.py b/src/preprocess/MultiWOZ_2.2.py new file mode 100644 index 0000000000000000000000000000000000000000..aafbc0558118223186e5cd9b1a3469e7b3101892 --- /dev/null +++ b/src/preprocess/MultiWOZ_2.2.py @@ -0,0 +1,246 @@ +from utils import parse, read_json_file, write_jsonl_file, write_json_file +import os + + +def parse_slots_index(slots): + slot2index = [] + for slot in slots: + # if slot["slot"] in slot2index and ( + # slot2index[slot["slot"]]["start"] != slot["start"] + # or slot2index[slot["slot"]]["end"] != slot["exclusive_end"] + # ): + # raise ValueError("test") + if "start" in slot: + slot2index.append((slot["slot"], slot["start"], slot["exclusive_end"])) + return slot2index + + +def get_slot_index(slot2index, start, slot): + for i in range(start, len(slot2index)): + if slot2index[i][0] == slot: + return i + 1, slot2index[i][1], slot2index[i][2] + + return start, -1, -1 + + +def is_empty_frame(frame): + def none_empty(state): + return state["requested_slots"] or state["slot_values"] + + return not (frame["actions"] or frame["slots"] or none_empty(frame["state"])) + + +def parse_dialogue_acts(frames, text): + domain_act_svt = dict() + + for frame in frames: + if is_empty_frame(frame): + continue + slot2index = parse_slots_index(frame["slots"]) + domain = frame["service"] + actions = frame["actions"] + + if domain not in domain_act_svt: + domain_act_svt[domain] = dict() + + cur_idx = 0 + for action in actions: + act = action["act"] + if act not in domain_act_svt[domain]: + domain_act_svt[domain][act] = set() + + # svp = { + # "slot": action["slot"], + # "value": action["values"], + # "relation": "equal_to", + # } + svp_tuple = ( + action["slot"], + tuple(action["values"]), + ) + + if "canonical_values" in action: + svp_tuple = svp_tuple + (tuple(action["canonical_values"]),) + + slot = action["slot"] + starts = [] + ends = [] + for _ in action["values"]: + cur_idx, start, end = get_slot_index(slot2index, cur_idx, slot) + starts.append(start) + ends.append(end) + + svp_tuple = svp_tuple + (tuple(starts),) + svp_tuple = svp_tuple + (tuple(ends),) + + domain_act_svt[domain][act].add(svp_tuple) + + def transform_svt(svt): + new_svt = [] + for item in sorted(svt): + svp = {"slot": item[0].split("-", 1)[-1], "values": []} + + for idx, value in enumerate(item[1]): + val_dict = { + "value": value, + } + if not len(item) % 2: + val_dict["cononical_value"] = item[2][idx] + + if len(item) > 3 and item[-2][idx] != -1: + val_dict["start"] = item[-2][idx] + val_dict["end"] = item[-1][idx] + + assert text[val_dict["start"] : val_dict["end"]] == value + svp["values"].append(val_dict) + + new_svt.append(svp) + return new_svt + + da = [] + for domain in domain_act_svt: + for act in domain_act_svt[domain]: + da.append( + { + "act": act, + "domain": domain, + "slot_value_table": transform_svt(domain_act_svt[domain][act]), + } + ) + + return da + + +def parse_belief_state(frames): + intent_domain_bs = dict() + + for frame in frames: + if is_empty_frame(frame): + continue + domain = frame["service"] + state = frame["state"] + intent = state["active_intent"] + + if intent not in intent_domain_bs: + intent_domain_bs[intent] = dict() + + if domain not in intent_domain_bs[intent]: + intent_domain_bs[intent][domain] = {"svt": set(), "requested_slots": set()} + + for slot in state["slot_values"]: + svp_tuple = (slot, tuple(state["slot_values"][slot])) + intent_domain_bs[intent][domain]["svt"].add(svp_tuple) + + intent_domain_bs[intent][domain]["requested_slots"] |= set( + state["requested_slots"] + ) + + def transform_svt(svt): + new_svt = [] + for item in sorted(svt): + new_svt.append( + { + "slot": item[0].split("-", 1)[-1], + "values": list(map(lambda x: {"value": x}, item[1])), + "relation": "=", + } + ) + return new_svt + + bs = [] + for intent in intent_domain_bs: + for domain in intent_domain_bs[intent]: + bs.append( + { + "intent": intent, + "domain": domain, + "requested_slots": sorted( + map( + lambda x: x.split("-", 1)[-1], + intent_domain_bs[intent][domain]["requested_slots"], + ) + ), + "informed_slot_value_table": transform_svt( + intent_domain_bs[intent][domain]["svt"] + ), + } + ) + + return bs + + +def parse_active_intents(frames): + active_intents = set() + for frame in frames: + active_intents.add(frame["state"]["active_intent"]) + + if "NONE" in active_intents: + active_intents.remove("NONE") + return sorted(active_intents) + + +def preprocess(args, split): + input_dir = os.path.join(args.input_dir, split) + processed_data = [] + for dialog_filename in os.listdir(input_dir): + if not dialog_filename.startswith("dialogues"): + continue + dialog_file_path = os.path.join(input_dir, dialog_filename) + data = read_json_file(dialog_file_path) + + for origin_dialog in data: + dialog = { + "turn": "multi", + "domain": origin_dialog["services"], + "dialog": [], + } + + turns = origin_dialog["turns"] + + for turn in turns: + role = turn["speaker"] + utterance = turn["utterance"] + frames = turn["frames"] + + # add dialogue acts + new_turn = { + "roles": [role], + "utterance": utterance, + "dialog_acts": parse_dialogue_acts(frames, utterance), + } + + if role == "USER": + new_turn["belief_state"] = parse_belief_state(frames) + new_turn["active_intents"] = parse_active_intents(frames) + + if "service_call" in turn: + new_turn["query"] = turn["service_call"] + if "service_results" in turn: + new_turn["querying_result"] = turn["querying_result"] + + dialog["dialog"].append(new_turn) + + processed_data.append(dialog) + + write_jsonl_file(processed_data, os.path.join(args.output_dir, f"{split}.jsonl")) + + # ontology + ontology = dict() + for domain_schema in read_json_file(os.path.join(args.input_dir, "schema.json")): + service_name = domain_schema["service_name"] + if service_name not in ontology: + ontology[service_name] = {} + + for slot in domain_schema["slots"]: + slot_name = slot["name"].split("-", 1)[-1] + if slot_name not in ontology[service_name]: + ontology[service_name][slot_name] = slot["is_categorical"] + + write_json_file(ontology, os.path.join(args.output_dir, f"{split}_ontology.json")) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") + preprocess(args, "test") diff --git a/src/preprocess/Multilingual_TOP.py b/src/preprocess/Multilingual_TOP.py new file mode 100644 index 0000000000000000000000000000000000000000..dd98e84c130d2dbc8f7899f2679d189a77ec8392 --- /dev/null +++ b/src/preprocess/Multilingual_TOP.py @@ -0,0 +1,70 @@ +import os +from utils import read_txt_file, write_jsonl_file, parse + +def readfile(path): + data = read_txt_file(path) + return data + + +def get_slot_value_table(decouple): + svt = [] + slot_list = decouple[1].split(',') + dic = {} + for item in slot_list: + if item == '': + svt.append(dic) + continue + l = item.split(':') + + slot = l[-1].split('/') + if len(slot) == 1: + dic["slot"] = slot[0] + else: + dic["slot"] = slot[0] + '_' + slot[-1] + + dic["value"] = decouple[2][int(l[0]):int(l[1])] + svt.append(dic) + dic = {} + return svt + + +def preprocess(args): + dirnames = [dirname for dirname in os.listdir(args.input_dir) if len(dirname) < 3] + dirnames = [os.path.join(args.input_dir, dirname) for dirname in dirnames if os.path.isdir(os.path.join(args.input_dir, dirname))] + + for dirname in dirnames: + filenames = [filename for filename in os.listdir(dirname) if filename.rfind(".tsv") != -1] + for filename in filenames: # for all three files + ''' + add train/eval/test instruction + ''' + path = os.path.join(dirname, filename) + data = readfile(path) + turns = [] + for line in data: + t = {} + t["turn"] = "single" + t["dialog"] = [] + + d = {} + elem = line.split("\t") + d["role"] = "ROLE" + d["utterance"] = elem[2] + d["slot_value_table"] = get_slot_value_table(elem) + d["summary"] = None + d["locale"] = elem[3][:2] + d["scenario"] = elem[0].split('/')[0] + d["intent"] = elem[0].split('/')[1] + d["answer"] = None + t["dialog"].append(d) + + t["knowledge"] = None + t["goal"] = None + t["QA"] = {} + turns.append(t) + write_jsonl_file(turns, os.path.join(args.output_dir, filename.split('.')[0] + '.jsonl')) + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/Multilingual_WOZ_2.0.py b/src/preprocess/Multilingual_WOZ_2.0.py new file mode 100644 index 0000000000000000000000000000000000000000..9a40c9830cbe9b3fdc7e84f0745dd450c0d6bbbf --- /dev/null +++ b/src/preprocess/Multilingual_WOZ_2.0.py @@ -0,0 +1,75 @@ +import os +import re +from utils import read_json_file, write_jsonl_file, parse + + +def get_slot_value_table(state): + svt = [] + dic = {} + for item in state: + dic["slot"] = item["slots"][0][0] + dic["value"] = item["slots"][0][1] + dic["act"] = item["act"] + svt.append(dic) + + return svt + + +def preprocess(args): + filenames = os.listdir(args.input_dir) + for filename in filenames: + ''' + add train/eval/test instruction + ''' + path = os.path.join(args.input_dir, filename) + data = read_json_file(path) + turns = [] + for item in data: + t = {"turn": "multi", + "dialog": [], + "dialog_label": [], # new feature + "knowledge": None, + "goal": None, + "QA": None} + + for dial in item["dialogue"]: + svt = get_slot_value_table(dial["belief_state"]) + locale = "en" + sr = re.search("_de|_it", filename) + if sr is not None: + locale = sr.group()[1:] + + d1 = {"role": "ROLE1", + "utterance": dial["transcript"], + "slot_value_table": svt, + "summary": None, + "locale": locale, + "scenario": None, + "intent": None, + "topic": None, + "answer": None} + + d2 = {"role": "ROLE2", + "utterance": dial["system_transcript"], + "slot_value_table": svt, + "summary": None, + "locale": "en", + "scenario": None, + "intent": None, + "topic": None, + "answer": None} + + label = dial["turn_label"] + + t["dialog"].append(d1) + t["dialog"].append(d2) + t["dialog_label"].append(label) + + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + filename[:-5] + '.jsonl') + + +if __name__ == "__main__": + args = parse() + preprocess(args) \ No newline at end of file diff --git a/src/preprocess/NarrativeQA.py b/src/preprocess/NarrativeQA.py new file mode 100644 index 0000000000000000000000000000000000000000..a323604b0a979e6a32b8857109bf2e4f1c0d8a91 --- /dev/null +++ b/src/preprocess/NarrativeQA.py @@ -0,0 +1,65 @@ +import os +from utils import parse, read_csv_file, write_jsonl_file + + +def load_summaries(infile): + data = read_csv_file(infile) + doc_ids = data["document_id"].values + splits = data["set"].values + summaries = data["summary"].values + + id2summary = dict() + + for doc_id, split, summary in zip(doc_ids, splits, summaries): + assert doc_id not in id2summary + id2summary[doc_id] = (split, summary) + + return id2summary + + +def preprocess(args): + summary_file = os.path.join(args.input_dir, "third_party/wikipedia/summaries.csv") + + processed_data = {"train": [], "dev": [], "test": []} + + id2summary = load_summaries(summary_file) + infile = os.path.join(args.input_dir, "qaps.csv") + data = read_csv_file(infile) + + for doc_id, split, question, answer1, answer2 in zip( + data["document_id"].values, + data["set"].values, + data["question"].values, + data["answer1"].values, + data["answer2"].values, + ): + assert split == id2summary[doc_id][0] + knowledge = { + "type": "dict", + "value": {"passage": id2summary[doc_id][1].replace("\n", "\\n")}, + } + + dial = {"turn": "single", "locale": "en", "dialog": [], "knowledge": knowledge} + + if split == "valid": + question = question.lower().capitalize() + + dial["dialog"].append({"roles": ["USER"], "utterance": question}) + + dial["dialog"].append({"roles": ["SYSTEM"], "utterance": answer1}) + + dial["dialog"].append({"roles": ["SYSTEM"], "utterance": answer2}) + + if split == "valid": + split = "dev" + + processed_data[split].append(dial) + + for split in processed_data: + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + write_jsonl_file(processed_data[split], outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/PCMD.py b/src/preprocess/PCMD.py new file mode 100644 index 0000000000000000000000000000000000000000..528d0f41c1085609495c3368077f28ed436f45c8 --- /dev/null +++ b/src/preprocess/PCMD.py @@ -0,0 +1,79 @@ +import os +from utils import read_json_file, write_jsonl_file, parse, choices +import re + + +def parse_knowledge(utterances): + answers = set() + dialogue = [] + + pattern = "@ent\d\d" + + for utter in utterances: + role = utter["speakers"] + if role: + answers.add(role) + else: + role = "@ent" + + dialogue.append({"roles": [role], "utterance": utter["tokens"]}) + + for entity in re.finditer(pattern, utter["tokens"]): + answers.add(entity.group()) + + return dialogue, sorted(answers) + + +def preprocess(args, split): + infile = os.path.join(args.input_dir, f"reading-comprehension-{split}.json") + + if split == "trn": + split = "train" + elif split == "tst": + split = "test" + + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + data = read_json_file(infile) + processed_data = [] + + for example in data: + knowledge_dial, answers = parse_knowledge(example["utterances"]) + + answer = example["answer"] + + assert answer in answers, f"{answer} {answers}" + + dial = {"turn": "single", "locale": "en", "dialog": []} + dial["knowledge"] = { + "type": "dict", + "value": {} + } + + dial["dialog"].append({"roles": ["USER"], "utterance": example["query"]}) + + label = -1 + for idx, choice in enumerate(answers): + # dial["dialog"].append( + # {"roles": [f"{choices[idx]} choice"], "utterance": choice} + # ) + dial["knowledge"]["value"][chr(ord('A') + idx)] = choice + + if answer == choice: + assert label == -1 + label = idx + + assert label >= 0 + dial["dialog"][-1]["roles_to_select"] = [chr(ord('A') + label)] + dial["knowledge"]["value"]["dialogue"] = knowledge_dial + + processed_data.append(dial) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "trn") + preprocess(args, "dev") + preprocess(args, "tst") diff --git a/src/preprocess/PERSONA-CHAT.py b/src/preprocess/PERSONA-CHAT.py new file mode 100644 index 0000000000000000000000000000000000000000..3def3fca42f6bcfa2bebf47ba892e15d82783e25 --- /dev/null +++ b/src/preprocess/PERSONA-CHAT.py @@ -0,0 +1,64 @@ +import os +from utils import parse, write_jsonl_file + + +def preprocess(args, split, part): + infile = os.path.join(args.input_dir, f"{split}_self_{part}_no_cands.txt") + if split == "valid": + split = "dev" + outfile = os.path.join(os.path.join(args.output_dir, part), f"{split}.jsonl") + + examples = [] + persona = [] + utterances = [] + + with open(infile, "r", encoding="UTF-8") as reader: + for line in reader: + # new dial + if line.startswith("1 your persona:"): + if utterances: + examples.append((persona, utterances)) + persona = [] + utterances = [] + + # remove leading number + line_num, line = line.split(" ", 1) + + assert line_num.isdigit() + + # collect persona + if line.startswith("your persona:"): + persona.append(line.split(":", 1)[-1].strip()) + else: + turn = line.split("\t") + assert len(turn) == 2 + for utterance in turn: + utterances.append(utterance.strip()) + + if utterances: + examples.append((persona, utterances)) + + processed_data = [] + for persona, utterances in examples: + dial = { + "turn": "multi", + "locale": "en", + "dialog": [], + "knowledge": {"type": "list", "value": persona}, + } + + roles = ["person 1", "person 2"] + for idx, utterance in enumerate(utterances): + dial["dialog"].append({"roles": [roles[idx % 2]], "utterance": utterance}) + + processed_data.append(dial) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train", "revised") + preprocess(args, "valid", "revised") + preprocess(args, "train", "original") + preprocess(args, "valid", "original") diff --git a/src/preprocess/QuAC.py b/src/preprocess/QuAC.py new file mode 100644 index 0000000000000000000000000000000000000000..d3cd307e80b38cb5e3675c1ccc263675e9fa65c4 --- /dev/null +++ b/src/preprocess/QuAC.py @@ -0,0 +1,56 @@ +from copy import deepcopy +from utils import read_json_file, write_jsonl_file, parse +import os + + +def preprocess(args, file): + path = os.path.join(args.input_dir, f"{file}_v0.2.json") + data = read_json_file(path) + data = data["data"] + + turns = [] + for article in data: + for para in article["paragraphs"]: + passage = para["context"] + if passage.endswith("CANNOTANSWER"): + passage = passage[:-12].strip() + t = { + "turn": "multi", + "locale": "en", + "dialog": [], + "knowledge": {"type": "dict", "value": {"passage": passage}}, + } + + for qa in para["qas"]: + dq = {"roles": ["USER"], "utterance": qa["question"]} + + t["dialog"].append(dq) + + assert file != "train" or len(qa["answers"]) == 1 + + for a in qa["answers"]: + text = a["text"] + if text == "CANNOTANSWER": + text = "None" + da = {"roles": ["SYSTEM"], "utterance": text} + + else: + da = { + "roles": ["SYSTEM"], + "utterance": text, + "start": a["answer_start"], + "end": a["answer_start"] + len(text), + } + t["dialog"].append(da) + + turns.append(t) + + if file == "val": + file = "dev" + write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl") + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "val") diff --git a/src/preprocess/RACE.py b/src/preprocess/RACE.py new file mode 100644 index 0000000000000000000000000000000000000000..f2b239d8c17833a5e366aa9336d617ff230cb6ad --- /dev/null +++ b/src/preprocess/RACE.py @@ -0,0 +1,52 @@ +import os +from utils import read_json_file, write_jsonl_file, parse, choices + + +def preprocess_split(args, split, highmid): + indir = os.path.join(os.path.join(args.input_dir, split), highmid) + outfile = os.path.join(os.path.join(args.output_dir, highmid), f"{split}.jsonl") + + processed_data = [] + for filename in os.listdir(indir): + infile = os.path.join(indir, filename) + data = read_json_file(infile) + + assert len(data["questions"]) == len(data["answers"]) == len(data["options"]) + + for question, options, answer in zip( + data["questions"], data["options"], data["answers"] + ): + dialog = { + "turn": "single", + "locale": "en", + "dialog": [], + "knowledge": {"type": "dict", "value": {}}, + } + + dialog["dialog"].append({"roles": ["USER"], "utterance": question}) + + for idx, option in enumerate(options): + # dialog["dialog"].append( + # {"roles": [f"{choices[idx]} choice"], "utterance": option} + # ) + dialog["knowledge"]["value"][chr(ord('A') + idx)] = option + + dialog["dialog"][-1]["roles_to_select"] = [ + answer + ] + + dialog["knowledge"]["value"]["passage"] = data["article"] + + processed_data.append(dialog) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess_split(args, "train", "high") + preprocess_split(args, "train", "middle") + preprocess_split(args, "dev", "high") + preprocess_split(args, "dev", "middle") + preprocess_split(args, "test", "high") + preprocess_split(args, "test", "middle") diff --git a/src/preprocess/README.md b/src/preprocess/README.md new file mode 100644 index 0000000000000000000000000000000000000000..71568d5fa1eb282b0544cfc1714a768e40a42acb --- /dev/null +++ b/src/preprocess/README.md @@ -0,0 +1,54 @@ +# Preprocess code for each dialogue dataset + +## Requirements +Preprocess each dataset into the predefined univernal format specified in [here](../../docs/diag-format.md). Each dataset has a **Python** script. In particular, the script is lanuched as following: + +``` +python xxx.py --input --output +``` + +**NOTE: the directory may include all train, dev and test splits. The script will preprocess all of them and output to the .** + +The output directory should include all three splits as below: + +``` + +|-- train.jsonl +|-- dev.jsonl +|-- test.jsonl +``` + +**NOTE: Ensure to test completely after finishing it! If you have some test code, you can place it to *src/test*.** + +A start script is shown at **sample.py**. + +## Update log +<<<<<<< HEAD +* [author (e.g. md), date (e.g. 2022.07.15)]: add a **sample.py** +* [xrk, 2022.07.27] : add a **CamRest676.py** +* [hyh, 2022.08.02] : add a **Amazon-MASSIVE.py** +* [hyh, 2022.08.03] : add a **MTOP.py** +* [xrk, 2022.08.03] : update **CamRest676.py**, **Amazon-MASSIVE.py**, add **utils.py**, **CANARD.py**,**corpus.py**, **dialogsum.py**, **reading-comprehension.py**... +* [xrk, 2022.08.10] : update **utils.py**, add **sfsnips.py**, **ASTE.py**, **sentihood.py**, **ipbanking.py**, **ipwu64.py**, **clinc.py**, **mwoz22.py**, **alphanli.py**, **commonsenseqa.py**, **cosmosqa.py**, **csqa2.py**, **socialiqa.py**, +* [hyh, 2022.08.10] : add **Multilingual_TOP.py**, **XPersona.py**, **DSTC6.py** +* [xrk, 2022.08,19] : add **goemotion.py**, **meld.py**, **emorynlp.py**, **reccon.py**, **ddrel.py**, **friendsqa.py**, **molweni.py**, **dialogre.py**, **dream.py** +* [hyh, 2022.08.19] : update **utils.py**, add **COD.py**, **MInDS-14.py**, **MKQA_integrated.py**, **MKQA_seperated.py**, **Multilingual_WOZ_2.0.py**, **Multi2WOZ.py** +* [xrk, 2022.08.24] : add **dialydialog.py**, **empatheticdialogues.py**, **commonsensedialog.py**, **rnnlg.py**, **e2e.py**, **google-sim-dialogue.py** +* [hyh, 2022.08.27] : add **BiToD.py**, **GlobalWoZ.py** + +## Refinement +* [md, 2022.09.16] : fix **alphanli**, in format +``` +obs1: ... +obs2: ... +hyp: (1 or 2) labeled if it is a possible hyp. +``` +* [md, 2022.09.17] : fix **Amazon-MASSIVE**, label the slot position in character level. +``` +sentence: ... +domain: ... +intent: ... +slot value tables: ... +``` + +* \ No newline at end of file diff --git a/src/preprocess/RNNLG.py b/src/preprocess/RNNLG.py new file mode 100644 index 0000000000000000000000000000000000000000..4d14fc5435445f8855161f9e6479c117a96b38cd --- /dev/null +++ b/src/preprocess/RNNLG.py @@ -0,0 +1,585 @@ +import os +from utils import parse, read_json_file, write_jsonl_file +import json +import random +import re +import itertools + +# Refer to https://github.com/shawnwun/RNNLG/blob/master/utils/nlp.py +replacements = [ + (" it's ", " it is "), + (" don't ", " do not "), + (" doesn't ", " does not "), + (" didn't ", " did not "), + (" you'd ", " you would "), + (" you're ", " you are "), + (" you'll ", " you will "), + (" i'm ", " i am "), + (" they're ", " they are "), + (" that's ", " that is "), + (" what's ", " what is "), + (" couldn't ", " could not "), + (" i've ", " i have "), + (" we've ", " we have "), + (" can't ", " cannot "), + (" i'd ", " i would "), + (" i'd ", " i would "), + (" aren't ", " are not "), + (" isn't ", " is not "), + (" wasn't ", " was not "), + (" weren't ", " were not "), + (" won't ", " will not "), + (" there's ", " there is "), + (" there're ", " there are "), + (" . . ", " . "), + (" restaurants ", " restaurant -s "), + (" hotels ", " hotel -s "), + (" laptops ", " laptop -s "), + (" cheaper ", " cheap -er "), + (" dinners ", " dinner -s "), + (" lunches ", " lunch -s "), + (" breakfasts ", " breakfast -s "), + (" expensively ", " expensive -ly "), + (" moderately ", " moderate -ly "), + (" cheaply ", " cheap -ly "), + (" prices ", " price -s "), + (" places ", " place -s "), + (" venues ", " venue -s "), + (" ranges ", " range -s "), + (" meals ", " meal -s "), + (" locations ", " location -s "), + (" areas ", " area -s "), + (" policies ", " policy -s "), + (" children ", " child -s "), + (" kids ", " kid -s "), + (" kidfriendly ", " kid friendly "), + (" cards ", " card -s "), + (" st ", " street "), + (" ave ", " avenue "), + (" upmarket ", " expensive "), + (" inpricey ", " cheap "), + (" inches ", " inch -s "), + (" uses ", " use -s "), + (" dimensions ", " dimension -s "), + (" driverange ", " drive range "), + (" includes ", " include -s "), + (" computers ", " computer -s "), + (" machines ", " machine -s "), + (" ecorating ", " eco rating "), + (" families ", " family -s "), + (" ratings ", " rating -s "), + (" constraints ", " constraint -s "), + (" pricerange ", " price range "), + (" batteryrating ", " battery rating "), + (" requirements ", " requirement -s "), + (" drives ", " drive -s "), + (" specifications ", " specification -s "), + (" weightrange ", " weight range "), + (" harddrive ", " hard drive "), + (" batterylife ", " battery life "), + (" businesses ", " business -s "), + (" hours ", " hour -s "), + (" accessories ", " accessory -s "), + (" ports ", " port -s "), + (" televisions ", " television -s "), + (" restrictions ", " restriction -s "), + (" extremely ", " extreme -ly "), + (" actually ", " actual -ly "), + (" typically ", " typical -ly "), + (" drivers ", " driver -s "), + (" teh ", " the "), + (" definitely ", " definite -ly "), + (" factors ", " factor -s "), + (" truly ", " true -ly "), + (" mostly ", " most -ly "), + (" nicely ", " nice -ly "), + (" surely ", " sure -ly "), + (" certainly ", " certain -ly "), + (" totally ", " total -ly "), + (" # ", " number "), + (" & ", " and "), +] + + +def insertSpace(token, text): + sidx = 0 + while True: + sidx = text.find(token, sidx) + if sidx == -1: + break + if ( + sidx + 1 < len(text) + and re.match("[0-9]", text[sidx - 1]) + and re.match("[0-9]", text[sidx + 1]) + ): + sidx += 1 + continue + if text[sidx - 1] != " ": + text = text[:sidx] + " " + text[sidx:] + sidx += 1 + if sidx + len(token) < len(text) and text[sidx + len(token)] != " ": + text = text[: sidx + 1] + " " + text[sidx + 1 :] + sidx += 1 + return text + + +def normalize(text): + text = re.sub(" [\.\?\!]$", "", text) + # lower case every word + text = text.lower() + + # replace white spaces in front and end + text = re.sub(r"^\s*|\s*$", "", text) + + # normalize phone number + ms = re.findall("\(?(\d{3})\)?[-.\s]?(\d{3})[-.\s]?(\d{4})", text) + if ms: + sidx = 0 + for m in ms: + sidx = text.find(m[0], sidx) + if text[sidx - 1] == "(": + sidx -= 1 + eidx = text.find(m[-1], sidx) + len(m[-1]) + text = text.replace(text[sidx:eidx], "".join(m)) + + # replace st. + text = text.replace(";", ",") + text = re.sub("$\/", "", text) + text = text.replace("/", " and ") + + # replace other special characters + text = re.sub('[":\<>@]', "", text) + # text = re.sub('[\":\<>@\(\)]','',text) + text = text.replace(" - ", "") + + # insert white space before and after tokens: + for token in ["?", ".", ",", "!"]: + text = insertSpace(token, text) + + # replace it's, does't, you'd ... etc + text = re.sub("^'", "", text) + text = re.sub("'$", "", text) + text = re.sub("'\s", " ", text) + text = re.sub("\s'", " ", text) + for fromx, tox in replacements: + text = " " + text + " " + text = text.replace(fromx, tox)[1:-1] + + # insert white space for 's + text = insertSpace("'s", text) + + # remove multiple spaces + text = re.sub(" +", " ", text) + + # concatenate numbers + tmp = text + tokens = text.split() + i = 1 + while i < len(tokens): + if re.match("^\d+$", tokens[i]) and re.match("\d+$", tokens[i - 1]): + tokens[i - 1] += tokens[i] + del tokens[i] + else: + i += 1 + text = " ".join(tokens) + + return text + + +class DataLexicaliser(object): + def __init__(self): + self.special_values = ["none", "yes", "no", "dontcare", "?"] + self.special_slots = ["type"] + + def delexicalise(self, sent, jssv): + raise NotImplementedError("method delexicalise() hasn't been implemented") + + def lexicalise(self, sent, jssv): + raise NotImplementedError("method lexicalise() hasn't been implemented") + + +class ExactMatchDataLexicaliser(DataLexicaliser): + def __init__(self): + DataLexicaliser.__init__(self) + self.typetoken = "" + + def delexicalise(self, sent, jssv): + # no slot values return directly + if len(jssv) == 1 and jssv[0][1] == None: + return sent + for slot, value in sorted(jssv, key=lambda x: len(x[-1]), reverse=True): + if value in self.special_values: + continue # special values, skip + + # taking care of all possible permutations of multiple values + vs = value.replace(" or ", " and ").split(" and ") + permutations = [" and ".join(x) for x in itertools.permutations(vs)] + [ + " or ".join(x) for x in itertools.permutations(vs) + ] + + # try to match for each possible permutation + isMatched = False + for p in permutations: + if p in sent: # exact match , ends + sent = (" " + sent + " ").replace( + " " + p + " ", " SLOT_" + slot.upper() + " ", 1 + )[1:-1] + isMatched = True + break + if not isMatched: + pass + # raise ValueError('value "'+value+'" cannot be delexicalised!') + + return sent + + def lexicalise(self, sent, jssv): + # no slot values return directly + if len(jssv) == 1 and jssv[0][1] == None: + return sent + + # replace values + for slot, value in sorted(jssv, key=lambda x: len(x[0]), reverse=True): + if value in self.special_values: + continue # special values, skip + if "SLOT_" + slot.upper() not in sent: + pass + # raise ValueError('slot "SLOT_'+slot.upper()+'" does not exist !') + else: + sent = (" " + sent + " ").replace( + " SLOT_" + slot.upper() + " ", " " + value + " ", 1 + )[1:-1] + sent = (" " + sent + " ").replace(" SLOT_TYPE ", " " + self.typetoken + " ")[ + 1:-1 + ] + return sent + + +# Refer to https://github.com/shawnwun/RNNLG/blob/master/loader/FeatParser.py +class DialogActParser(object): + ## Assumption for the parser : + ## 1. dacts are separated by the "|" token + ## 2. slot value pairs are separated by the ";" token + ## 3. special values specified in "resource/special_values.tx" + ## unify the special values by dictionary keys + ## 4. it strips all "'" or """ token + ## 5. output json format + + def __init__(self): + self.special_values = { + "none": ["none"], + "yes": ["true", "yes"], + "no": ["false", "no"], + "dontcare": ["dontcare", "dont_care"], + } + + def parse(self, dact, keepValues=False): + acttype = dact.split("(")[0] + slt2vals = dact.split("(")[1].replace(")", "").split(";") + jsact = {"acttype": acttype, "s2v": []} + for slt2val in slt2vals: + if slt2val == "": # no slot + jsact["s2v"].append((None, None)) + elif "=" not in slt2val: # no value + slt2val = slt2val.replace("_", "").replace(" ", "") + jsact["s2v"].append((slt2val.strip("'\""), "?")) + else: # both slot and value exist + s, v = [x.strip("'\"") for x in slt2val.split("=")] + s = s.replace("_", "").replace(" ", "") + for key, vals in self.special_values.items(): + if v in vals: # unify the special values + v = key + if not v in self.special_values and not keepValues: # delexicalisation + v = "_" + jsact["s2v"].append((s, v)) + return jsact + + +class DActFormatter(object): + ## basic DAct formatter + ## 1. abstract class for Hard and Soft subclass + ## 2. define the basic parser command + def __init__(self): + self.parser = DialogActParser() + self.special_values = self.parser.special_values.keys() + + def format(self, dact, keepValues=False): + raise NotImplementedError("method format() hasn't been implemented") + + def parse(self, dact, keepValues=False): + return self.parser.parse(dact, keepValues) + + +class SoftDActFormatter(DActFormatter): + ## Soft DAct formatter + ## 1. subclass of DActFormatter + ## 2. main interface for parser/formatter + ## 3. formatting the JSON DAct produced by DialogActParser + ## into a feature format fed into the network + + def __init__(self): + DActFormatter.__init__(self) + + def format(self, dact): + jsact = super(SoftDActFormatter, self).parse(dact) + mem = {} + feature = [] + for sv in jsact["s2v"]: + s, v = sv + if s == None: # no slot no value + continue # skip it + elif v == "?": # question case + feature.append((s, v)) + elif v == "_": # categories + if s in mem: # multiple feature values + feature.append((s, v + str(mem[s]))) + mem[s] += 1 + else: # first occurance + feature.append((s, v + "1")) + mem[s] = 2 + elif v in self.special_values: # special values + feature.append((s, v)) + feature = [("a", jsact["acttype"])] + sorted(feature) + return feature + + def parse(self, dact, keepValues=False): + return self.parser.parse(dact, keepValues) + + +class DataReader(object): + def __init__( + self, + seed, + domain, + obj, + vocabfile, + trainfile, + validfile, + testfile, + percentage=1.0, + verbose=0, + lexCutoff=4, + ): + self.percentage = percentage # percentage of data used + # container for data + self.data = {"train": [], "valid": [], "test": []} + self.mode = "train" # mode for accessing data + self.index = 0 # index for accessing data + self.obj = obj + + ## init formatter/lexicaliser + self.formatter = SoftDActFormatter() + self.lexicaliser = ExactMatchDataLexicaliser() + # self.hardformatter = HardDActFormatter() + + ## for lexicalising SLOT_TYPE + self.lexicaliser.typetoken = domain + + # initialise dataset + self._setupData(trainfile, validfile, testfile) + # self._testDelexicalisation() + + # obtain pos tags + # self.obtainTags() + + if verbose: + self._printStats() + + def read(self): + ## default implementation for read() function + ## support batch reading & random shuffling + mode = "test" + if self.mode != mode: + self.mode = mode + index = 0 + + # end of data , reset index & return None + if self.index >= len(self.data[mode]): + data = None + self.index = 0 + # shuffle data except for testing + if mode != "test": + random.shuffle(self.data[mode]) + return data + + # reading a batch + start = self.index + end = ( + self.index + 1 + if self.index + 1 < len(self.data[mode]) + else len(self.data[mode]) + ) + data = self.data[mode][start:end] + self.index += 1 + + sents = [] + mrs = [] + for feat, dact, sent, base in data: + sents.append([self.lexicalise(sent[i], dact) for i in range(len(sent))]) + mrs.append(dact) + + assert len(mrs) == 1 and len(sents) == 1 + + return mrs[0], sents[0] + + def format(self, dact): + return self.formatter.format(dact) + + def _setupData(self, trainfile, validfile, testfile): + # load data from file + # train_group = True if self.obj == "dt" else False + # self.data["train"] = self._loadData(trainfile, train_group) + # self.data["valid"] = self._loadData(validfile, train_group) + self.data["test"] = self._loadData(testfile, False, True) + # cut train/valid data by proportion + # self.data["train"] = self.data["train"][ + # : int(self.percentage * len(self.data["train"])) + # ] + # self.data["valid"] = self.data["valid"][ + # : int(self.percentage * len(self.data["valid"])) + # ] + + def _loadData(self, filename, group=True, multiref=False): + with open(filename, "r") as fin: + data = json.load(fin) + + container = [] + for dact, sent, base in data: + # word tokens + sent = self.delexicalise(normalize(re.sub(" [\.\?\!]$", "", sent)), dact) + base = self.delexicalise(normalize(re.sub(" [\.\?\!]$", "", base)), dact) + feat = self.formatter.format(dact) + container.append([feat, dact, sent, base]) + + # grouping several sentences w/ the same dialogue act + # for testing set, or DT on train/valid + if group or multiref: + # grouping data points according to unique DAs + a2ref = {} + for feat, dact, sent, base in container: + if tuple(feat) in a2ref: + a2ref[tuple(feat)][0].append(dact) + a2ref[tuple(feat)][1].append(sent) + a2ref[tuple(feat)][2].append(base) + else: + a2ref[tuple(feat)] = [[dact], [sent], [base]] + # return grouped examples + if group: + reordered_container = [] + for feat, bundle in a2ref.items(): + reordered_container.append([feat, bundle[0], bundle[1], bundle[2]]) + return reordered_container + # return examples w/ multiple references + if multiref: + reordered_container = [] + for feat, dact, sent, base in container: + reordered_container.append( + [feat, dact, a2ref[tuple(feat)][1], a2ref[tuple(feat)][2]] + ) + return reordered_container + # if no grouping nor multiref, return directly + else: + return container + + def delexicalise(self, sent, dact): + feat = self.formatter.parse(dact, keepValues=True) + return self.lexicaliser.delexicalise(sent, feat["s2v"]) + + def lexicalise(self, sent, dact): + feat = self.formatter.parse(dact, keepValues=True) + return self.lexicaliser.lexicalise(sent, feat["s2v"]) + + +def parse_knowledge(mr): + intent, svt_str = mr.strip().split("(", 1) + intent = intent.strip(" ?") + assert svt_str[-1] == ")" + svt_str = svt_str[:-1].strip() + + knowledge = dict() + knowledge["intent"] = intent + + if svt_str: + for svp in svt_str.split(";"): + if "=" in svp: + slot, value = svp.strip().split("=") + value = value.strip("'\"") + else: + slot = svp.strip() + value = "None" + + slot = slot.strip() + knowledge[slot] = value + + return {"type": "dict", "value": knowledge} + + +def remove_header_comment(infile): + lines = [] + with open(infile, "r", encoding="UTF-8") as reader: + for line in reader: + if line.startswith("#"): + continue + lines.append(line) + + with open(infile, "w", encoding="UTF-8") as writer: + writer.write("".join(lines)) + + +def preprocess(args, split): + for domain in os.listdir(args.input_dir): + infile = os.path.join(os.path.join(args.input_dir, domain), f"{split}.json") + remove_header_comment(infile) + + if split == "valid": + outfile = os.path.join(os.path.join(args.output_dir, domain), "dev.jsonl") + else: + outfile = os.path.join( + os.path.join(args.output_dir, domain), f"{split}.jsonl" + ) + + processed_data = [] + + if split == "train": + data = read_json_file(infile) + + for example in data: + mr = example[0] + dial = { + "turn": "single", + "locale": "en", + "dialog": [], + "knowledge": parse_knowledge(mr), + } + dial["dialog"].append( + {"roles": ["SYSTEM"], "utterance": example[1].strip()} + ) + + processed_data.append(dial) + + else: + dr = DataReader(42, domain, "ml", None, None, None, infile) + + for i in range(len(dr.data["test"])): + mr, refs = dr.read() + dial = { + "turn": "single", + "locale": "en", + "dialog": [], + "knowledge": parse_knowledge(mr), + } + + for ref in refs: + dial["dialog"].append( + {"roles": ["SYSTEM"], "utterance": ref.strip()} + ) + + processed_data.append(dial) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "valid") + preprocess(args, "test") diff --git a/src/preprocess/Restaurant8k.py b/src/preprocess/Restaurant8k.py new file mode 100644 index 0000000000000000000000000000000000000000..0d5d5faefd3c0757f9c0864370cf731074e5b633 --- /dev/null +++ b/src/preprocess/Restaurant8k.py @@ -0,0 +1,72 @@ +from utils import ( + read_json_file, + write_jsonl_file, + parse, +) + +import os + + +# def collect_slots(args, file): +# path = args.input_dir + "/" + file + ".json" +# data = read_json_file(path) +# slots = set() +# for i in range(len(data)): +# if "labels" in data[i].keys(): +# for label in data[i]["labels"]: +# slots.add(label["slot"]) + +# return list(slots) + + +def parse_slot_value_table(labels, text): + svt = [] + + for item in labels: + start = 0 + end = len(text) + if "valueSpan" in item and "startIndex" in item["valueSpan"]: + start = item["valueSpan"]["startIndex"] + if "valueSpan" in item and "endIndex" in item["valueSpan"]: + end = item["valueSpan"]["endIndex"] + svt.append( + { + "slot": item["slot"], + "values": [{"value": text[start:end], "start": start, "end": end}], + "relation": "=", + } + ) + + return svt + + +def preprocess(args, split): + processed_data = [] + filename = f"{split}.json" if split != "train" else "train_0.json" + path = os.path.join(args.input_dir, filename) + data = read_json_file(path) + + for i in range(len(data)): + t = {"turn": "single", "locale": "en", "dialog": []} + d = { + "roles": ["USER"], + "utterance": data[i]["userInput"]["text"], + } + if "labels" in data[i]: + d["slots_to_fill"] = { + "slot_value_table": parse_slot_value_table( + data[i]["labels"], d["utterance"] + ) + } + + t["dialog"].append(d) + + processed_data.append(t) + + write_jsonl_file(processed_data, args.output_dir + "/" + split + ".jsonl") + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "test") diff --git a/src/preprocess/SAMSum.py b/src/preprocess/SAMSum.py new file mode 100644 index 0000000000000000000000000000000000000000..a1d905927c5dd9b92e0c7fdf4ffdd181d81d0447 --- /dev/null +++ b/src/preprocess/SAMSum.py @@ -0,0 +1,60 @@ +from utils import read_json_file, parse, write_jsonl_file +import os + +def parse_turn(turn): + assert ":" in turn, turn + idx = turn.index(':') + role = turn[:idx] + utterance = turn[idx + 1:].strip() + + return role, utterance + +def preprocess(args, split): + infile = os.path.join(args.input_dir, f"{split}.json") + + if split == "val": + split = "dev" + + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + with open(infile, "r", encoding="UTF-8") as reader: + data = read_json_file(infile) + processed_data = [] + + for example in data: + summary = example["summary"] + text_dialogue = example["dialogue"] + + if "\r\n" in text_dialogue: + sep = "\r\n" + else: + sep = "\n" + + dialogue = { + "turn": "multi", + "locale": "en", + "dialog": [], + "summary": summary + } + turns = text_dialogue.split(sep) + for turn in turns: + if not turn.strip(): + continue + roles, utterance = parse_turn(turn) + dialogue["dialog"].append({ + "roles": [roles], + "utterance": utterance + }) + + processed_data.append(dialogue) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == '__main__': + args = parse() + preprocess(args, "train") + preprocess(args, "val") + preprocess(args, "test") + + diff --git a/src/preprocess/SGD.py b/src/preprocess/SGD.py new file mode 100644 index 0000000000000000000000000000000000000000..9addf11b13ff773ad8f225aba7f24ea0af607239 --- /dev/null +++ b/src/preprocess/SGD.py @@ -0,0 +1,243 @@ +from utils import parse, read_json_file, write_jsonl_file, write_json_file +import os +import shutil + + +def parse_slots_index(slots): + slot2index = [] + for slot in slots: + # if slot["slot"] in slot2index and ( + # slot2index[slot["slot"]]["start"] != slot["start"] + # or slot2index[slot["slot"]]["end"] != slot["exclusive_end"] + # ): + # raise ValueError("test") + slot2index.append((slot["slot"], slot["start"], slot["exclusive_end"])) + return slot2index + + +def get_slot_index(slot2index, start, slot): + for i in range(start, len(slot2index)): + if slot2index[i][0] == slot: + return i + 1, slot2index[i][1], slot2index[i][2] + + return start, -1, -1 + + +def parse_dialogue_acts(frames, text): + domain_act_svt = dict() + + for frame in frames: + slot2index = parse_slots_index(frame["slots"]) + # remove the service number + domain = frame["service"].split("_", 1)[0] + actions = frame["actions"] + + if domain not in domain_act_svt: + domain_act_svt[domain] = dict() + + cur_idx = 0 + for action in actions: + act = action["act"] + if act not in domain_act_svt[domain]: + domain_act_svt[domain][act] = set() + + # svp = { + # "slot": action["slot"], + # "value": action["values"], + # "relation": "equal_to", + # } + svp_tuple = ( + action["slot"], + tuple(action["values"]), + ) + + if "canonical_values" in action: + svp_tuple = svp_tuple + (tuple(action["canonical_values"]),) + + slot = action["slot"] + starts = [] + ends = [] + for _ in action["values"]: + cur_idx, start, end = get_slot_index(slot2index, cur_idx, slot) + starts.append(start) + ends.append(end) + + svp_tuple = svp_tuple + (tuple(starts),) + svp_tuple = svp_tuple + (tuple(ends),) + + domain_act_svt[domain][act].add(svp_tuple) + + def transform_svt(svt): + new_svt = [] + for item in sorted(svt): + svp = {"slot": item[0], "values": []} + + for idx, value in enumerate(item[1]): + val_dict = { + "value": value, + } + if not len(item) % 2: + val_dict["cononical_value"] = item[2][idx] + + if len(item) > 3 and item[-2][idx] != -1: + val_dict["start"] = item[-2][idx] + val_dict["end"] = item[-1][idx] + + assert text[val_dict["start"] : val_dict["end"]] == value + svp["values"].append(val_dict) + + new_svt.append(svp) + return new_svt + + da = [] + for domain in domain_act_svt: + for act in domain_act_svt[domain]: + da.append( + { + "act": act, + "domain": domain, + "slot_value_table": transform_svt(domain_act_svt[domain][act]), + } + ) + + return da + + +def parse_belief_state(frames): + intent_domain_bs = dict() + + for frame in frames: + # remove the service number + domain = frame["service"].split("_", 1)[0] + # domain = frame["service"] + state = frame["state"] + intent = state["active_intent"] + + if intent not in intent_domain_bs: + intent_domain_bs[intent] = dict() + + if domain not in intent_domain_bs[intent]: + intent_domain_bs[intent][domain] = {"svt": set(), "requested_slots": set()} + + for slot in state["slot_values"]: + svp_tuple = (slot, tuple(state["slot_values"][slot])) + intent_domain_bs[intent][domain]["svt"].add(svp_tuple) + + intent_domain_bs[intent][domain]["requested_slots"] |= set( + state["requested_slots"] + ) + + def transform_svt(svt): + new_svt = [] + for item in sorted(svt): + new_svt.append( + { + "slot": item[0], + "values": list(map(lambda x: {"value": x}, item[1])), + "relation": "=", + } + ) + return new_svt + + bs = [] + for intent in intent_domain_bs: + for domain in intent_domain_bs[intent]: + bs.append( + { + "intent": intent, + "domain": domain, + "requested_slots": sorted( + intent_domain_bs[intent][domain]["requested_slots"] + ), + "informed_slot_value_table": transform_svt( + intent_domain_bs[intent][domain]["svt"] + ), + } + ) + + return bs + + +def parse_active_intents(frames): + active_intents = set() + for frame in frames: + active_intents.add(frame["state"]["active_intent"]) + + return sorted(active_intents) + + +def preprocess(args, split): + input_dir = os.path.join(args.input_dir, split) + processed_data = [] + + # # ---------------- FIXME ---------------- # + # filenames = [ + # filename + # for filename in os.listdir(input_dir) + # if filename.startswith("dialogues") + # ] + # for i in range(len(filenames)): + # dialog_filename = f"dialogues_{(i + 1):03d}.json" + # print(dialog_filename) + for dialog_filename in os.listdir(input_dir): + if not dialog_filename.startswith("dialogues"): + continue + dialog_file_path = os.path.join(input_dir, dialog_filename) + data = read_json_file(dialog_file_path) + + for origin_dialog in data: + dialog = { + "turn": "multi", + "domain": origin_dialog["services"], + "dialog": [], + } + + turns = origin_dialog["turns"] + + for turn in turns: + role = turn["speaker"] + utterance = turn["utterance"] + frames = turn["frames"] + + # add dialogue acts + new_turn = { + "roles": [role], + "utterance": utterance, + "dialog_acts": parse_dialogue_acts(frames, utterance), + } + + if role == "USER": + new_turn["belief_state"] = parse_belief_state(frames) + new_turn["active_intents"] = parse_active_intents(frames) + + if "service_call" in turn: + new_turn["query"] = turn["service_call"] + if "service_results" in turn: + new_turn["querying_result"] = turn["querying_result"] + + dialog["dialog"].append(new_turn) + + processed_data.append(dialog) + + write_jsonl_file(processed_data, os.path.join(args.output_dir, f"{split}.jsonl")) + + # ontology + ontology = dict() + for domain_schema in read_json_file(os.path.join(input_dir, "schema.json")): + service_name = domain_schema["service_name"].split("_", 1)[0] + if service_name not in ontology: + ontology[service_name] = {} + + for slot in domain_schema["slots"]: + slot_name = slot["name"].split("-", 1)[-1] + if slot_name not in ontology[service_name]: + ontology[service_name][slot_name] = slot["is_categorical"] + + write_json_file(ontology, os.path.join(args.output_dir, f"{split}_ontology.json")) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") + preprocess(args, "test") diff --git a/src/preprocess/SNIPS.py b/src/preprocess/SNIPS.py new file mode 100644 index 0000000000000000000000000000000000000000..e9493984ac73b45b692a98423611caf2f3e94305 --- /dev/null +++ b/src/preprocess/SNIPS.py @@ -0,0 +1,84 @@ +import os +from utils import read_json_file, parse, write_jsonl_file + + +def parse_example(example): + text = "" + svt = [] + + pre_len = 0 + for part in example: + text += part["text"] + if "entity" in part: + svt.append( + { + "slot": part["entity"], + "values": [ + { + "value": part["text"], + "start": pre_len, + "end": pre_len + len(part["text"]), + } + ], + "relation": "=", + } + ) + + pre_len += len(part["text"]) + + # check + for svp in svt: + start = svp["values"][0]["start"] + end = svp["values"][0]["end"] + assert text[start:end] == svp["values"][0]["value"] + return text, svt + + +def preprocess(args, split): + for domain in os.listdir(args.input_dir): + + if domain == "PlayMusic": + encoding = "ISO-8859-1" + else: + encoding = "UTF-8" + + if domain.startswith("README"): + continue + domain_dir = os.path.join(args.input_dir, domain) + + if split == "train": + infile = os.path.join(domain_dir, f"train_{domain}_full.json") + else: + infile = os.path.join(domain_dir, f"validate_{domain}.json") + + data = read_json_file(infile, encoding=encoding) + processed_data = [] + + outfile = os.path.join(os.path.join(args.output_dir, domain), f"{split}.jsonl") + + for example in data[domain]: + text, svt = parse_example(example["data"]) + processed_data.append( + { + "turn": "single", + "locale": "en", + "dialog": [ + { + "roles": ["USER"], + "utterance": text, + "slots_to_fill": { + "intent": domain, + "slot_value_table": svt, + }, + } + ], + } + ) + + write_jsonl_file(processed_data, outfile, encoding=encoding) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") diff --git a/src/preprocess/SParC.py b/src/preprocess/SParC.py new file mode 100644 index 0000000000000000000000000000000000000000..7019d306093a2d2ec29590e7b6274f6f40c761ae --- /dev/null +++ b/src/preprocess/SParC.py @@ -0,0 +1,83 @@ +from Spider import dump_db_json_schema +import json +import os +import copy +from utils import write_jsonl_file, parse +import shutil + + +def preprocess(args, split): + data_filepaths = [os.path.join(args.input_dir, f"{split}.json")] + db_path = os.path.join(args.input_dir, "database") + + out_db_path = os.path.join(args.output_dir, "database") + + if not os.path.exists(out_db_path): + shutil.copytree(db_path, out_db_path) + + schema_cache = dict() + processed_data = [] + for data_filepath in data_filepaths: + with open(data_filepath, encoding="utf-8") as f: + sparc = json.load(f) + + for sample in sparc: + dialog = { + "locale": "en", + "dialog": [], + } + db_id = sample["database_id"] + if db_id not in schema_cache: + schema_cache[db_id] = dump_db_json_schema( + db_path + "/" + db_id + "/" + db_id + ".sqlite", db_id + ) + schema = schema_cache[db_id] + + dialog["knowledge"] = { + "type": "dict", + "value": {"db_id": db_id, "schema": schema, "db_path": out_db_path}, + } + + final_dialog = copy.deepcopy(dialog) + final_dialog["turn"] = "single" + final_dialog["dialog"].append( + { + "roles": ["USER"], + "utterance": sample["final"]["utterance"] + .replace("``", '"') + .replace("''", '"') + .strip(), + "sql": sample["final"]["query"], + } + ) + + if split == "train": + processed_data.append(final_dialog) + dialog["turn"] = "multi" + + for _, turn in enumerate(sample["interaction"]): + dialog["dialog"].append( + { + "roles": ["USER"], + "utterance": turn["utterance"] + .replace("``", '"') + .replace("''", '"') + .strip(), + "sql": turn["query"], + } + ) + + processed_data.append(dialog) + + write_jsonl_file(processed_data, os.path.join(args.output_dir, f"{split}.jsonl")) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") + + shutil.copyfile( + os.path.join(args.input_dir, "tables.json"), + os.path.join(args.output_dir, "tables.json"), + ) diff --git a/src/preprocess/SQuAD_2.0.py b/src/preprocess/SQuAD_2.0.py new file mode 100644 index 0000000000000000000000000000000000000000..94e9ab50bf2228d86445090e6ffcce96579fe85c --- /dev/null +++ b/src/preprocess/SQuAD_2.0.py @@ -0,0 +1,58 @@ +from utils import parse, read_json_file, write_jsonl_file +import os + + +def preprocess(args, split): + infile = os.path.join(args.input_dir, f"{split}-v2.0.json") + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + data = read_json_file(infile)["data"] + processed_data = [] + + for part in data: + title = part["title"] + for example in part["paragraphs"]: + paragraph = example["context"] + + knowledge = { + "type": "dict", + "value": {"title": title, "passage": paragraph}, + } + + for qa in example["qas"]: + question = qa["question"] + processed_data.append( + { + "turn": "single", + "locale": "en", + "dialog": [{"roles": ["USER"], "utterance": question}], + "knowledge": knowledge, + } + ) + + assert split != "train" or len(qa["answers"]) <= 1 + if qa["answers"]: + for answer in qa["answers"]: + processed_data[-1]["dialog"].append( + { + "roles": ["SYSTEM"], + "utterance": answer["text"], + "start": answer["answer_start"], + "end": answer["answer_start"] + len(answer["text"]), + } + ) + else: + processed_data[-1]["dialog"].append( + { + "roles": ["SYSTEM"], + "utterance": "None", + } + ) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") diff --git a/src/preprocess/Soccer.py b/src/preprocess/Soccer.py new file mode 100644 index 0000000000000000000000000000000000000000..7727a7b8d08d57bf3396cf9c6993c6b4b13a5383 --- /dev/null +++ b/src/preprocess/Soccer.py @@ -0,0 +1,59 @@ +import os +from utils import parse, write_jsonl_file + + +def parse_kg_knowledge(kg): + return {"type": "dict", "value": {"knowledge graph": kg}} + + +def preprocess(args, split, part): + if part == "incar": + infile = os.path.join(args.input_dir, f"{split}_incar.txt") + else: + infile = os.path.join(args.input_dir, f"{split}.txt") + + if split == "val": + split = "dev" + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + + processed_data = [] + with open(infile, "r", encoding="UTF-8") as reader: + dialogues = reader.read().strip().split("\n\n") + + for dialogue in dialogues: + assert dialogue + + kg = [] + turns = [] + lines = dialogue.split("\n") + assert lines[0] == "#conv#" + + for line in lines[1:]: + if line.startswith("0"): + kg.append(line.split()[1:]) + else: + line = line.split(" ", 1)[1] + turns.append(line.split("\t")[:2]) + + dial = { + "turn": "multi", + "locale": "en", + "dialog": [], + "knowledge": parse_kg_knowledge(kg), + } + + for turn in turns: + dial["dialog"].append({"roles": ["USER"], "utterance": turn[0]}) + + dial["dialog"].append({"roles": ["SYSTEM"], "utterance": turn[1]}) + + processed_data.append(dial) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train", "KVR") + preprocess(args, "val", "KVR") + preprocess(args, "test", "KVR") diff --git a/src/preprocess/SocialIQA.py b/src/preprocess/SocialIQA.py new file mode 100644 index 0000000000000000000000000000000000000000..0c5fff7274eb476ecea618b30a6803d8b2ef6729 --- /dev/null +++ b/src/preprocess/SocialIQA.py @@ -0,0 +1,47 @@ +import os +from utils import read_jsonl_file, read_line_labels, write_jsonl_file, parse, choices + + +def preprocess(args, split): + indatafile = os.path.join(args.input_dir, f"{split}.jsonl") + inlabelfile = os.path.join(args.input_dir, f"{split}-labels.lst") + + data = read_jsonl_file(indatafile) + labels = list(map(int, read_line_labels(inlabelfile))) + + processed_data = [] + + for example, label in zip(data, labels): + dial = { + "turn": "single", + "locale": "en", + "dialog": [], + "knowledge": {"type": "dict", "value": {}}, + } + + dial["dialog"].append({"roles": ["USER"], "utterance": example["question"]}) + + example.pop("question") + + assert len(example) == 4 + + for i in range(3): + answer = example[f"answer{chr(ord('A')+i)}"] + # dial["dialog"].append( + # {"roles": [f"{choices[i]} choice"], "utterance": answer} + # ) + dial["knowledge"]["value"][f"answer{chr(ord('A')+i)}"] = answer + + assert 1 <= label <= 3 + dial["dialog"][-1]["roles_to_select"] = [f"answer{chr(ord('A')+label-1)}"] + dial["knowledge"]["value"]["passage"] = example["context"] + + processed_data.append(dial) + + write_jsonl_file(processed_data, os.path.join(args.output_dir, f"{split}.jsonl")) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") diff --git a/src/preprocess/Spider.py b/src/preprocess/Spider.py new file mode 100644 index 0000000000000000000000000000000000000000..7033e0dd57f7c7f5e999863df4e42102a482e5ee --- /dev/null +++ b/src/preprocess/Spider.py @@ -0,0 +1,174 @@ +from typing import List +import sqlite3 +import sys +import traceback +from utils import parse, write_jsonl_file +import os +import json +import shutil + + +def convert_fk_index(data): + fk_holder = [] + for fk in data["foreign_keys"]: + tn, col, ref_tn, ref_col = fk[0][0], fk[0][1], fk[1][0], fk[1][1] + ref_cid, cid = None, None + try: + tid = data["table_names_original"].index(tn) + ref_tid = data["table_names_original"].index(ref_tn) + + for i, (tab_id, col_org) in enumerate(data["column_names_original"]): + if tab_id == ref_tid and ref_col == col_org: + ref_cid = i + elif tid == tab_id and col == col_org: + cid = i + if ref_cid and cid: + fk_holder.append([cid, ref_cid]) + except: + traceback.print_exc() + print("table_names_original: ", data["table_names_original"]) + print("finding tab name: ", tn, ref_tn) + sys.exit() + return fk_holder + + +def dump_db_json_schema(db, f): + """read table and column info""" + conn = sqlite3.connect(db) + conn.execute("pragma foreign_keys=ON") + cursor = conn.execute("SELECT name FROM sqlite_master WHERE type='table';") + + data = { + "db_id": f, + "table_names_original": [], + "table_names": [], + "column_names_original": [(-1, "*")], + "column_names": [(-1, "*")], + "column_types": ["text"], + "primary_keys": [], + "foreign_keys": [], + } + + fk_holder = [] + for i, item in enumerate(cursor.fetchall()): + table_name = item[0] + data["table_names_original"].append(table_name) + data["table_names"].append(table_name.lower().replace("_", " ")) + fks = conn.execute( + "PRAGMA foreign_key_list('{}') ".format(table_name) + ).fetchall() + # print("db:{} table:{} fks:{}".format(f,table_name,fks)) + fk_holder.extend([[(table_name, fk[3]), (fk[2], fk[4])] for fk in fks]) + cur = conn.execute("PRAGMA table_info('{}') ".format(table_name)) + for j, col in enumerate(cur.fetchall()): + data["column_names_original"].append((i, col[1])) + data["column_names"].append((i, col[1].lower().replace("_", " "))) + # varchar, '' -> text, int, numeric -> integer, + col_type = col[2].lower() + if ( + "char" in col_type + or col_type == "" + or "text" in col_type + or "var" in col_type + ): + data["column_types"].append("text") + elif ( + "int" in col_type + or "numeric" in col_type + or "decimal" in col_type + or "number" in col_type + or "id" in col_type + or "real" in col_type + or "double" in col_type + or "float" in col_type + ): + data["column_types"].append("number") + elif "date" in col_type or "time" in col_type or "year" in col_type: + data["column_types"].append("time") + elif "boolean" in col_type: + data["column_types"].append("boolean") + else: + data["column_types"].append("others") + + if col[5] == 1: + data["primary_keys"].append(len(data["column_names"]) - 1) + + data["foreign_keys"] = fk_holder + data["foreign_keys"] = convert_fk_index(data) + + return data + + +def SpiderGenerator(data_filepaths: List[str], db_path: str): + schema_cache = dict() + for data_filepath in data_filepaths: + with open(data_filepath, encoding="utf-8") as f: + spider = json.load(f) + for sample in spider: + db_id = sample["db_id"] + if db_id not in schema_cache: + schema_cache[db_id] = dump_db_json_schema( + db=os.path.join(db_path, db_id, f"{db_id}.sqlite"), f=db_id + ) + schema = schema_cache[db_id] + yield { + "query": sample["query"], + "question": sample["question"], + "schema": schema, + "db_id": db_id, + } + + +def preprocess(args, split): + db_path = os.path.join(args.input_dir, "database") + out_db_path = os.path.join(args.output_dir, "database") + + if not os.path.exists(out_db_path): + shutil.copytree(db_path, out_db_path) + + if split == "train": + data_filepaths = [ + os.path.join(args.input_dir, "train_spider.json"), + os.path.join(args.input_dir, "train_others.json"), + ] + else: + data_filepaths = [os.path.join(args.input_dir, "dev.json")] + + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + processed_data = [] + + for example in SpiderGenerator(data_filepaths, db_path): + processed_data.append( + { + "turn": "single", + "locale": "en", + "dialog": [ + { + "roles": ["USER"], + "utterance": example["question"], + "sql": example["query"], + } + ], + "knowledge": { + "type": "dict", + "value": { + "db_id": example["db_id"], + "schema": example["schema"], + "db_path": out_db_path, + }, + }, + } + ) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args, "train") + preprocess(args, "dev") + + shutil.copyfile( + os.path.join(args.input_dir, "tables.json"), + os.path.join(args.output_dir, "tables.json"), + ) diff --git a/src/preprocess/TOP.py b/src/preprocess/TOP.py new file mode 100644 index 0000000000000000000000000000000000000000..8d7a4a340bde2953aed0ff22eb63bcb874cc29da --- /dev/null +++ b/src/preprocess/TOP.py @@ -0,0 +1,36 @@ +from utils import write_jsonl_file, parse +import os + + +def preprocess(args, split): + processed_data = [] + + filename = f"{split}.tsv" if split != "dev" else "eval.tsv" + + with open(os.path.join(args.input_dir, filename), "r") as reader: + for line in reader: + text, _, label = line.strip().split("\t") + + processed_data.append( + { + "turn": "single", + "locale": "en", + "dialog": [ + { + "roles": ["USER"], + "utterance": text, + "sql": label, + } + ], + } + ) + + write_jsonl_file(processed_data, os.path.join(args.output_dir, f"{split}.jsonl")) + + +if __name__ == "__main__": + args = parse() + + preprocess(args, "train") + preprocess(args, "dev") + preprocess(args, "test") diff --git a/src/preprocess/TaskMaster-1.py b/src/preprocess/TaskMaster-1.py new file mode 100644 index 0000000000000000000000000000000000000000..e2bbba551cccd28a26556130b4a4ea46dd2ea3db --- /dev/null +++ b/src/preprocess/TaskMaster-1.py @@ -0,0 +1,78 @@ +import os +from utils import read_json_file, write_jsonl_file, parse + + +def load_splits(args): + split_dir = os.path.join(args.input_dir, "train-dev-test") + splits = dict() + + for split in ["train", "dev", "test"]: + infile = os.path.join(split_dir, f"{split}.csv") + splits[split] = set() + + with open(infile, "r") as reader: + for line in reader: + dial_id = line.strip()[:-1] + + assert dial_id not in splits[split] + + splits[split].add(dial_id) + return splits + + +def get_split(splits, dial_id): + if dial_id in splits["train"]: + return "train" + if dial_id in splits["dev"]: + return "dev" + if dial_id in splits["test"]: + return "test" + + raise ValueError() + + +def parse_slots_to_fill(segments): + # svt = [] + # for segment in segments: + # svt.append({ + # "slot" + # }) + # TODO + pass + + +def preprocess(args): + splits = load_splits(args) + + data = read_json_file(os.path.join(args.input_dir, "self-dialogs.json")) + processed_data = {split: [] for split in ["train", "dev", "test"]} + for dial in data: + dial_id = dial["conversation_id"] + split = get_split(splits, dial_id) + + dialog = {"turn": "single", "locale": "en", "dialog": []} + for utter in dial["utterances"]: + role = utter["speaker"] + assert role in ["USER", "ASSISTANT"] + if role == "ASSISTANT": + role = "SYSTEM" + + turn = { + "roles": [role], + "utterance": utter["text"], + } + + if "segments" in utter: + turn["slots_to_fill"] = parse_slots_to_fill(utter["segments"]) + dialog["dialog"].append(turn) + + processed_data[split].append(dialog) + + for split in processed_data: + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + write_jsonl_file(processed_data[split], outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/TaskMaster-3.py b/src/preprocess/TaskMaster-3.py new file mode 100644 index 0000000000000000000000000000000000000000..4e7fcce8dbc438a26ecee05c1a89f5873069fd96 --- /dev/null +++ b/src/preprocess/TaskMaster-3.py @@ -0,0 +1,74 @@ +import os +from utils import read_json_file, write_jsonl_file, parse + + +def load_splits(args): + split_dir = os.path.join(args.input_dir, "splits") + splits = dict() + + for split in ["train", "dev", "test"]: + indir = os.path.join(split_dir, split) + splits[split] = set() + + for filename in os.listdir(indir): + if not filename.startswith(split): + continue + infile = os.path.join(indir, filename) + with open(infile, "r", encoding="utf-8") as reader: + for line in reader: + dial_id = line.strip().split()[-1] + + # assert dial_id not in splits[split] + + splits[split].add(dial_id) + return splits + + +def get_split(splits, dial_id): + if dial_id in splits["train"]: + return "train" + if dial_id in splits["dev"]: + return "dev" + if dial_id in splits["test"]: + return "test" + + raise ValueError() + + +def parse_slots_to_fill(segments): + pass + + +def preprocess(args): + splits = load_splits(args) + + data_dir = os.path.join(args.input_dir, "data") + processed_data = {split: [] for split in ["train", "dev", "test"]} + + for filename in os.listdir(data_dir): + data = read_json_file(os.path.join(data_dir, filename)) + for dial in data: + dial_id = dial["conversation_id"] + split = get_split(splits, dial_id) + + dialog = {"turn": "single", "locale": "en", "dialog": []} + for utter in dial["utterances"]: + turn = { + "roles": [utter["speaker"]], + "utterance": utter["text"], + } + + if "segments" in utter: + turn["slots_to_fill"] = parse_slots_to_fill(utter["segments"]) + dialog["dialog"].append(turn) + + processed_data[split].append(dialog) + + for split in processed_data: + outfile = os.path.join(args.output_dir, f"{split}.jsonl") + write_jsonl_file(processed_data[split], outfile) + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/Twitter.py b/src/preprocess/Twitter.py new file mode 100644 index 0000000000000000000000000000000000000000..020cff98b6e0efdc16a888e2de55e5e05b5bb996 --- /dev/null +++ b/src/preprocess/Twitter.py @@ -0,0 +1,59 @@ +from utils import parse, write_jsonl_file +import os + +sent_map = { + "0": "neutral", + "1": "positive", + "-1": "negative", +} + + +def preprocess(args, split): + dialogues = [] + + with open(os.path.join(args.input_dir, f"{split}.raw"), "r") as reader: + while True: + text = reader.readline().strip() + + if not text: + break + + target = reader.readline().strip() + sentiment = sent_map[reader.readline().strip()] + + target_start = text.index("$T$") + target_end = target_start + len(target) + + text = text.replace("$T$", target) + + dialogues.append( + { + "turn": "single", + "locale": "en", + "dialog": [ + { + "roles": ["USER"], + "utterance": text, + "aspects": [ + { + "target": { + "value": target, + "start": target_start, + "end": target_end, + }, + "sentiment": sentiment, + } + ], + } + ], + } + ) + + write_jsonl_file(dialogues, os.path.join(args.output_dir, f"{split}.jsonl")) + + +if __name__ == "__main__": + args = parse() + + preprocess(args, "train") + preprocess(args, "test") diff --git a/src/preprocess/XPersona.py b/src/preprocess/XPersona.py new file mode 100644 index 0000000000000000000000000000000000000000..ad6771b69b6fa17b2b11593379e48813963fea4d --- /dev/null +++ b/src/preprocess/XPersona.py @@ -0,0 +1,53 @@ +import os +import re +from utils import read_json_file, write_jsonl_file, parse + +def preprocess(args): + filenames = [filename for filename in os.listdir(args.input_dir) if not os.path.isdir(os.path.join(args.input_dir, filename))] + for filename in filenames: # for all three types of files + ''' + add train/eval/test instruction + ''' + path = os.path.join(args.input_dir, filename) + data = read_json_file(path) + locale = filename[:2] + turns = [] + for item in data: + t = { + "turn": "multi", + "locale": locale, + "dialog": [], + "knowledge": { + "type": "persona", + "value": [] + } + } + + for dial in item["dialogue"]: + d1 = { + "role": "ROLE1", + "utterance": dial[0] + } + d2 = { + "role": "ROLE2", + "utterance": dial[1] + } + + t["dialog"].append(d1) + t["dialog"].append(d2) + + t["knowledge"]["value"].append( + { + "role": "ROLE2", + "description": item["persona"] + } + ) + + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + filename[:2] + '_' + re.search('test|train|valid', filename).group() + '.jsonl') + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/__pycache__/Soccer.cpython-38.pyc b/src/preprocess/__pycache__/Soccer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6230fbbae753b09430d5f16258bc51ec4f00fbfa Binary files /dev/null and b/src/preprocess/__pycache__/Soccer.cpython-38.pyc differ diff --git a/src/preprocess/__pycache__/Spider.cpython-38.pyc b/src/preprocess/__pycache__/Spider.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4120ecadc6a17be7c9925fc0c770973f9a075b7 Binary files /dev/null and b/src/preprocess/__pycache__/Spider.cpython-38.pyc differ diff --git a/src/preprocess/__pycache__/utils.cpython-312.pyc b/src/preprocess/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71528b0a294acd37fd18f85c01c16a6d8d2d29d1 Binary files /dev/null and b/src/preprocess/__pycache__/utils.cpython-312.pyc differ diff --git a/src/preprocess/__pycache__/utils.cpython-38.pyc b/src/preprocess/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5697eb69f2e85b23ff317e928207403915a99584 Binary files /dev/null and b/src/preprocess/__pycache__/utils.cpython-38.pyc differ diff --git a/src/preprocess/corpus.py b/src/preprocess/corpus.py new file mode 100644 index 0000000000000000000000000000000000000000..8454dfee119418fec0f4465f61ffab6107036d07 --- /dev/null +++ b/src/preprocess/corpus.py @@ -0,0 +1,42 @@ +import re +from utils import read_json_file, read_jsonl_file, write_json_file, write_jsonl_file, parse + + +def reformat(args, file): + path = args.input_dir + "/" + file + ".json" + data = read_json_file(path) + turns = [] + for i in range(len(data)): + t = {"turn": "multi", "dialog": []} + dialogues = re.split('\\n|\\r\\n', data[i]["dialogue"]) + for dialog in dialogues: + d = {} + ru = re.split(': ', dialog) + if len(ru) < 2: + continue + d["role"] = ru[0] + d["utterance"] = ru[1] + d["slot_value_table"] = [] + d["summary"] = data[i]["summary"] + d["intent"] = None + d["answer"] = None + d["locale"] = None + d["scenario"] = None + t["dialog"].append(d) + + t["goal"] = None + t["knowledge"] = None + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl") + + +def preprocess(args): + reformat(args, "train") + reformat(args, "val") + reformat(args, "test") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/csqa2.py b/src/preprocess/csqa2.py new file mode 100644 index 0000000000000000000000000000000000000000..c355c452a0c8ab18568107b5f42a3515aa64ccb4 --- /dev/null +++ b/src/preprocess/csqa2.py @@ -0,0 +1,58 @@ +import re +from utils import read_json_file, read_jsonl_file, write_json_file, write_jsonl_file, parse + + +def preprocess_for_train_and_dev(args, file): + path = args.input_dir + "/" + file + ".json" + data = read_jsonl_file(path) + turns = [] + for i in range(len(data)): + t = { + "turn": "single", + "domain": [data[i]["topic_pormpt"]], + "locale": "en", + "dialog": [] + } + d = { + "role": "question", + "utterance": data[i]["question"], + "class_label": data[i]["answer"] == "yes" + } + + t["dialog"].append(d) + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl") + + +def preprocess_for_test(args, file): + path = args.input_dir + "/" + file + ".json" + data = read_jsonl_file(path) + turns = [] + for i in range(len(data)): + t = { + "turn": "single", + "domain": [data[i]["topic_pormpt"]], + "locale": "en", + "dialog": [] + } + d = { + "role": "question", + "utterance": data[i]["question"] + } + + t["dialog"].append(d) + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl") + + +def preprocess(args): + preprocess_for_train_and_dev(args, "CSQA2_train") + preprocess_for_train_and_dev(args, "CSQA2_dev") + preprocess_for_test(args, "CSQA2_test_no_answers") + + +if __name__ == "__main__": + args = parse() + preprocess(args) \ No newline at end of file diff --git a/src/preprocess/dialogre.py b/src/preprocess/dialogre.py new file mode 100644 index 0000000000000000000000000000000000000000..f7e2d260a3c75ccc93d9c4ba0bf7c6d3197204e1 --- /dev/null +++ b/src/preprocess/dialogre.py @@ -0,0 +1,53 @@ +import re +from utils import read_json_file, read_jsonl_file, write_json_file, write_jsonl_file, parse + + +def reformat(args, file): + path = args.input_dir + "/" + file + ".json" + data = read_json_file(path) + locale = "cn" if "cn/" in path else "en" + turns = [] + for turn_ in data: + turn = turn_[0] + t = { + "turn": "multi", + "locale": locale, + "dialog": [], + "instance_relations": [] + } + for tt in turn: + d = { + "role": tt[:9], + "utterance": tt[11:] + } + t["dialog"].append(d) + + relations = turn_[1] + for relation in relations: + inst = { + "instance1": relation["x"], + "instance2": relation["y"], + "relations": [] + } + for i in range(len(relation["r"])): + r = { + "relation": relation["r"][i], + "trigger": relation["t"][i] + } + inst["relations"].append(r) + t["instance_relations"].append(inst) + + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl") + + +def preprocess(args): + reformat(args, "train") + reformat(args, "dev") + reformat(args, "test") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/reccon.py b/src/preprocess/reccon.py new file mode 100644 index 0000000000000000000000000000000000000000..236399fcf37e0b6f2220685a3334e983c58fe820 --- /dev/null +++ b/src/preprocess/reccon.py @@ -0,0 +1,79 @@ +from utils import read_json_file, write_jsonl_file, parse +import os + + +def get_knowledge(input_dir): + path = os.path.join(input_dir, "dailydialog_train.json") + data = read_json_file(path) + + emotions = set() + for part in data.values(): + for origin_dail in part: + for origin_turn in origin_dail: + emotions.add(origin_turn["emotion"]) + + return sorted(emotions) + + +def reformat(args, file, knowledge): + path = os.path.join(args.input_dir, f"dailydialog_{file}.json") + data = read_json_file(path) + if file == "valid": + file = "dev" + outfile = os.path.join(args.output_dir, f"{file}.jsonl") + + processed_data = [] + for part in data.values(): + for origin_dail in part: + dial = { + "turn": "multi", + "locale": "en", + "dialog": [], + "knowledge": {"type": "list", "value": knowledge}, + } + + for origin_turn in origin_dail: + turn = { + "roles": [origin_turn["speaker"]], + "utterance": origin_turn["utterance"], + "emotions": [{"emotion": origin_turn["emotion"]}], + } + + if "expanded emotion cause evidence" in origin_turn: + turn["emotions"][0]["evidences"] = [] + for idx, turn_idx in enumerate( + origin_turn["expanded emotion cause evidence"] + ): + # NOTE: turn is 0-index + if turn_idx == "b": + # start + turn_idx = -1 + turn["emotions"][0]["evidences"].append({"turn": turn_idx}) + else: + turn_idx -= 1 + span = origin_turn["expanded emotion cause span"][idx] + start = origin_dail[turn_idx]["utterance"].index(span) + end = start + len(span) + turn["emotions"][0]["evidences"].append( + { + "turn": turn_idx, + "span": span, + "start": start, + "end": end, + } + ) + turn["emotions"][0]["evidence_types"] = origin_turn["type"] + + dial["dialog"].append(turn) + + processed_data.append(dial) + + write_jsonl_file(processed_data, outfile) + + +if __name__ == "__main__": + args = parse() + knowledge = get_knowledge(args.input_dir) + reformat(args, "train", knowledge) + reformat(args, "valid", knowledge) + reformat(args, "test", knowledge) diff --git a/src/preprocess/sample.py b/src/preprocess/sample.py new file mode 100644 index 0000000000000000000000000000000000000000..b28a0113c1e392b5015afbe02b88ac565343be5d --- /dev/null +++ b/src/preprocess/sample.py @@ -0,0 +1,42 @@ +import argparse + + +def parse(): + """ + Arguments parser. + """ + parser = argparse.ArgumentParser("==== XXX Dataset Preprocessor ====") + + parser.add_argument("--input_dir", type=str, help="input directory of the dataset.") + parser.add_argument( + "--output_dir", type=str, help="output directory of the dataset." + ) + + return parser.parse_args() + + +# --- SOME HELPER FUNCTIONS HERE --- # +""" +FOR EXAMPLE + +def preprocess_for_train_and_dev(args): + ... + +def preprocess_for_test(args): + ... + +""" +# ... # +# ---------------------------------- # + + +def preprocess(args): + # TODO + # preprocess for train & dev... + # preprocess for test... + pass + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/sentihood.py b/src/preprocess/sentihood.py new file mode 100644 index 0000000000000000000000000000000000000000..90b48517cb55efd87cffcbfff2fc7cd98d57a05e --- /dev/null +++ b/src/preprocess/sentihood.py @@ -0,0 +1,35 @@ +import os +from utils import read_json_file, write_jsonl_file, parse + + +def reformat(args, file): + path = os.path.join(args.input_dir, f"sentihood-{file}.json") + data = read_json_file(path) + turns = [] + for i in range(len(data)): + t = {"turn": "single", "locale": "en", "dialog": []} + d = {"roles": ["USER"] + , "utterance": data[i]["text"], "aspects": []} + for o in data[i]["opinions"]: + aspect = { + "target": {"value": o["target_entity"]}, + "category": o["aspect"], + "sentiment": o["sentiment"], + } + d["aspects"].append(aspect) + t["dialog"].append(d) + + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl") + + +def preprocess(args): + reformat(args, "train") + reformat(args, "dev") + reformat(args, "test") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/sfsnips.py b/src/preprocess/sfsnips.py new file mode 100644 index 0000000000000000000000000000000000000000..5d723df1e49e242b1f605fdc46febd3ab69e622c --- /dev/null +++ b/src/preprocess/sfsnips.py @@ -0,0 +1,60 @@ +import re +from utils import read_json_file, read_jsonl_file, write_json_file, write_jsonl_file, parse + + +''' +{'domains':[{'description': str, + '@type':'domain', + 'intents': list[{'description': str, + 'benchmark':, + 'queries': [{'text': str, + 'results_per_service': {',': {'slots': ['name': str, 'matching_slots': ['slots': str, 'services': str] + 'value': str]}}}], + 'slots':, + '@type': 'intent', + 'name': str}], + 'name': str}], +'version':'0.0.1'} +''' + + +def reformat(args, file): + path = args.input_dir + "/" + file + ".json" + data = read_json_file(path)["domains"] + turns = [] + for i in range(len(data)): + for j in range(len(data[i]['intents'])): + for k in range(len(data[i]['intents'][j]['queries'])): + t = {"turn": "single", "dialog": []} + d = {"role": "ROLE", "utterance": data[i]['intents'][j]['queries'][k]['text']} + d["belief_state"] = [{"domain": None, + "goal": [{"intent": None, + "slot_value_table": []} + ]} + ] + d["querying_result"] = None + d["summary"] = None + d["locale"] = None + d["topic"] = None + d["opinions"] = None + d["answer"] = None + for tt in data[i]['intents'][j]['queries'][k]['results_per_service'].values(): + for s in tt['slots']: + value = s['value'] + for ss in s['matching_slots']: + slot = ss['slot'] + d["belief_state"][0]["goal"][0]['slot_value_table'].append({'slot': slot, 'value': value}) + t["dialog"].append(d) + t["knowledge"] = None + turns.append(t) + + write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl") + + +def preprocess(args): + reformat(args, "benchmark_data") + + +if __name__ == "__main__": + args = parse() + preprocess(args) diff --git a/src/preprocess/utils.py b/src/preprocess/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..17cb6037d136c55a1c8a399bd6f42772e205cec4 --- /dev/null +++ b/src/preprocess/utils.py @@ -0,0 +1,107 @@ +import argparse +import json +import pandas as pd +import os + +choices = [ + "First", + "Second", + "Third", + "Fourth", + "Fifth", + "Sixth", + "Seventh", + "Eighth", + "Ninth", + "Tenth", + "Eleventh", + "Twelfth", + "Thirteenth", + "Fourteenth", + "Fifteenth", + "Sixteenth", + "Seventeenth", + "Eighteenth", + "Nineteenth", + "Twentieth", +] + + +def read_json_file(filename, encoding="UTF-8"): + with open(filename, "r", encoding=encoding) as fr: + return json.load(fr) + + +def read_jsonl_file(filename): + with open(filename, "r", encoding="utf8") as fr: + return [json.loads(line) for line in fr] + + +def write_json_file(data, filename, encoding="utf8"): + with open(filename, "w", encoding=encoding) as fw: + json.dump(data, fw, ensure_ascii=False, indent=4) + + +def write_jsonl_file(data, filename, encoding="utf8"): + dirname = os.path.dirname(filename) + if not os.path.exists(dirname): + os.makedirs(dirname) + + with open(filename, "w", encoding=encoding) as fw: + for d in data: + fw.write(json.dumps(d, ensure_ascii=False) + "\n") + + +def read_normal_file(filename): + with open(filename, "r", encoding="utf8") as fr: + return fr.readlines() + + +def read_line_labels(filename): + with open(filename, "r", encoding="UTF-8") as reader: + return reader.read().strip().split("\n") + + +def read_csv_file(filename, no_header=False): + try: + with open(filename, "r", encoding="utf8") as fr: + if no_header: + return pd.read_csv(fr, header=None) + return pd.read_csv(fr) + except: + with open(filename, "r") as fr: + cnt = 0 + for line in fr: + cnt += 1 + print(cnt, line) + + +def read_tsv_file(filename): + with open(filename, "r", encoding="utf8") as fr: + return pd.read_csv(fr, sep="\t") + + +def parse(): + """ + Arguments parser. + """ + parser = argparse.ArgumentParser("==== XXX Dataset Preprocessor ====") + + parser.add_argument("--input_dir", type=str, help="input directory of the dataset.") + parser.add_argument( + "--output_dir", type=str, help="output directory of the dataset." + ) + parser.add_argument( + "--domain", + type=str, + default=None, + help="domain of the dataset. (default: None)", + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="random seed for data shuffling. (default: 42)", + ) + + return parser.parse_args() diff --git a/src/scripts/convert_to_seq.sh b/src/scripts/convert_to_seq.sh new file mode 100644 index 0000000000000000000000000000000000000000..cb23cb1215897bb00e02dbb859a6be89c34160d6 --- /dev/null +++ b/src/scripts/convert_to_seq.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +SOURCE_DIR=../../data/unified +TARGET_DIR=../../data/seq + +if [ ! -d "${TARGET_DIR}" ]; then + mkdir -p ${TARGET_DIR} +fi + +# DSTC 2 +python DST.py ${SOURCE_DIR}/DSTC2 ${TARGET_DIR}/DST-DSTC2 +# sim-M +python DST.py ${SOURCE_DIR}/sim-M ${TARGET_DIR}/DST-sim-M +# sim-R +python DST.py ${SOURCE_DIR}/sim-R ${TARGET_DIR}/DST-sim-R +# MultiWOZ 2.2 +python DST.py ${SOURCE_DIR}/MultiWOZ_2.2 ${TARGET_DIR}/DST-MultiWOZ_2.2 +# SGD +python DST.py ${SOURCE_DIR}/SGD ${TARGET_DIR}/DST-SGD +# MultiWOZ 2.1 +python DST.py ${SOURCE_DIR}/MultiWOZ_2.1 ${TARGET_DIR}/DST-MultiWOZ_2.1 + + +# Spider +python T2S.py ${SOURCE_DIR}/Spider ${TARGET_DIR}/T2S-Spider +# SParC +python T2S.py ${SOURCE_DIR}/SParC ${TARGET_DIR}/T2S-SParC +# CoSQL +python T2S.py ${SOURCE_DIR}/CoSQL ${TARGET_DIR}/T2S-CoSQL + + +# MultiDoGo +for domain in `ls ${SOURCE_DIR}/MultiDoGo`; do + python SF.py ${SOURCE_DIR}/MultiDoGo/${domain} ${TARGET_DIR}/SF-MultiDoGo-${domain} +done +# Restaurant8k +python SF.py ${SOURCE_DIR}/Restaurant8k ${TARGET_DIR}/SF-Restaurant8k +# SNIPS +for domain in `ls ${SOURCE_DIR}/SNIPS`; do + python SF.py ${SOURCE_DIR}/SNIPS/${domain} ${TARGET_DIR}/SF-SNIPS-${domain} +done + + +# DDRel +python RRR.py ${SOURCE_DIR}/DDRel ${TARGET_DIR}/RRR-DDRel + + +# CamRest676 +python QCR.py ${SOURCE_DIR}/CamRest676 ${TARGET_DIR}/QCR-CamRest676 +# CANARD +python QCR.py ${SOURCE_DIR}/CANARD ${TARGET_DIR}/QCR-CANARD "knowledge" + + +# AlphaNLI +python NLI.py ${SOURCE_DIR}/AlphaNLI ${TARGET_DIR}/NLI-AlphaNLI + + +# CoQA +python MRC.py ${SOURCE_DIR}/CoQA ${TARGET_DIR}/MRC-CoQA +# DoQA +python MRC.py ${SOURCE_DIR}/DoQA ${TARGET_DIR}/MRC-DoQA +# FriendsQA +python MRC.py ${SOURCE_DIR}/FriendsQA ${TARGET_DIR}/MRC-FriendsQA +# Molweni +python MRC.py ${SOURCE_DIR}/Molweni ${TARGET_DIR}/MRC-Molweni +# QuAC +python MRC.py ${SOURCE_DIR}/QuAC ${TARGET_DIR}/MRC-QuAC +# SQuAD 2.0 +python MRC.py ${SOURCE_DIR}/SQuAD_2.0 ${TARGET_DIR}/MRC-SQuAD_2.0 + + +# CommonsenseQA +python MCQA.py ${SOURCE_DIR}/CommonsenseQA ${TARGET_DIR}/MCQA-CommonsenseQA +# CommonsenseQA 2.0 +python MCQA.py ${SOURCE_DIR}/CommonsenseQA_2.0 ${TARGET_DIR}/MCQA-CommonsenseQA_2.0 +# CosmosQA +python MCQA.py ${SOURCE_DIR}/CosmosQA ${TARGET_DIR}/MCQA-CosmosQA +# DREAM +python MCQA.py ${SOURCE_DIR}/DREAM ${TARGET_DIR}/MCQA-DREAM +# MuTual +python MCQA.py ${SOURCE_DIR}/MuTual/mutual ${TARGET_DIR}/MCQA-Mutual +# MuTual-plus +python MCQA.py ${SOURCE_DIR}/MuTual/mutual_plus ${TARGET_DIR}/MCQA-Mutual-plus +# RACE +python MCQA.py ${SOURCE_DIR}/RACE/high ${TARGET_DIR}/MCQA-RACE-high +python MCQA.py ${SOURCE_DIR}/RACE/middle ${TARGET_DIR}/MCQA-RACE-middle +# PCMD +python MCQA.py ${SOURCE_DIR}/PCMD ${TARGET_DIR}/MCQA-PCMD +# SocialIQA +python MCQA.py ${SOURCE_DIR}/SocialIQA ${TARGET_DIR}/MCQA-SocialIQA + + +# DialogSum +python DS.py ${SOURCE_DIR}/DialogSum ${TARGET_DIR}/DS-DialogSum +# SAMSum +python DS.py ${SOURCE_DIR}/SAMSum ${TARGET_DIR}/DS-SAMSum + + +# CMUDoG +python DCRG.py ${SOURCE_DIR}/CMUDoG ${TARGET_DIR}/DCRG-CMUDoG "turn-document" +# CommonsenseDialog +python DCRG.py ${SOURCE_DIR}/CommonsenseDialog ${TARGET_DIR}/DCRG-CommonsenseDialog "document" +# EmpathicDialogue +python DCRG.py ${SOURCE_DIR}/EmpathicDialogue ${TARGET_DIR}/DCRG-EmpathicDialogue "document" +# NarrativeQA +python DCRG.py ${SOURCE_DIR}/NarrativeQA ${TARGET_DIR}/DCRG-NarrativeQA "document" multi-ref +# Soccer +python DCRG.py ${SOURCE_DIR}/Soccer ${TARGET_DIR}/DCRG-Soccer "kg" +# Incar +python DCRG.py ${SOURCE_DIR}/Incar ${TARGET_DIR}/DCRG-Incar "kg" +# CornellMovie +python DCRG.py ${SOURCE_DIR}/CornellMovie ${TARGET_DIR}/DCRG-CornellMovie "None" + + +# DailyDialog +python ER.py ${SOURCE_DIR}/DailyDialog ${TARGET_DIR}/ER-DailyDialog +# EmoryNLP +python ER.py ${SOURCE_DIR}/EmoryNLP ${TARGET_DIR}/ER-EmoryNLP +# GoEmotions +python ER.py ${SOURCE_DIR}/GoEmotions ${TARGET_DIR}/ER-GoEmotions +# MELD +python ER.py ${SOURCE_DIR}/MELD ${TARGET_DIR}/ER-MELD +# IEMOCAP +python ER.py ${SOURCE_DIR}/IEMOCAP ${TARGET_DIR}/ER-IEMOCAP + + +# Banking77 +python ID.py ${SOURCE_DIR}/Banking77 ${TARGET_DIR}/ID-Banking77 +# CLINC150 +python ID.py ${SOURCE_DIR}/CLINC150 ${TARGET_DIR}/ID-CLINC150 +# HWU64 +python ID.py ${SOURCE_DIR}/HWU64 ${TARGET_DIR}/ID-HWU64 + + +# E2E +python DT.py ${SOURCE_DIR}/E2E ${TARGET_DIR}/DT-E2E +# RNNLG +for domain in `ls ${SOURCE_DIR}/RNNLG`; do + python DT.py ${SOURCE_DIR}/RNNLG/${domain} ${TARGET_DIR}/DT-RNNLG-${domain} +done + + +# PERSONA-CHAT +for dataset in `ls ${SOURCE_DIR}/PERSONA-CHAT`; do + python CC.py ${SOURCE_DIR}/PERSONA-CHAT/${dataset} ${TARGET_DIR}/CC-PERSONA-CHAT-${dataset} +done + + +# ENLP +python CI.py ${SOURCE_DIR}/ENLP ${TARGET_DIR}/CI-ENLP + + +# ASTE +for domain in `ls ${SOURCE_DIR}/ASTE`; do + python ABSA.py ${SOURCE_DIR}/ASTE/${domain} ${TARGET_DIR}/ABSA-ASTE-${domain} +done +# MAMS-ACSA +python ABSA.py ${SOURCE_DIR}/MAMS-ACSA ${TARGET_DIR}/ABSA-MAMS-ACSA +# MAMS-ATSA +python ABSA.py ${SOURCE_DIR}/MAMS-ATSA ${TARGET_DIR}/ABSA-MAMS-ATSA +# Twitter +python ABSA.py ${SOURCE_DIR}/Twitter ${TARGET_DIR}/ABSA-Twitter \ No newline at end of file diff --git a/src/scripts/convert_to_seq_dialoglue.sh b/src/scripts/convert_to_seq_dialoglue.sh new file mode 100644 index 0000000000000000000000000000000000000000..1dd12a764680208cbb0afa5c77615f460b5a6955 --- /dev/null +++ b/src/scripts/convert_to_seq_dialoglue.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +SOURCE_DIR=../../data/unified_dialoglue +TARGET_DIR=../../data/seq_dialoglue + +if [ ! -d "${TARGET_DIR}" ]; then + mkdir -p ${TARGET_DIR} +fi + +# # Banking77 +# python ID.py ${SOURCE_DIR}/Banking77 ${TARGET_DIR}/ID-Banking77 +# # CLINC150 +# python ID.py ${SOURCE_DIR}/CLINC150 ${TARGET_DIR}/ID-CLINC150 +# # HWU64 +# python ID.py ${SOURCE_DIR}/HWU64 ${TARGET_DIR}/ID-HWU64 + +# # Restaurant8k +# python SF.py ${SOURCE_DIR}/Restaurant8k ${TARGET_DIR}/SF-Restaurant8k + +# # DSTC 8 +# python DST.py ${SOURCE_DIR}/DSTC8 ${TARGET_DIR}/DST-DSTC8 + +# TOP +python SP.py ${SOURCE_DIR}/TOP ${TARGET_DIR}/SP-TOP \ No newline at end of file diff --git a/src/scripts/convert_to_unified.sh b/src/scripts/convert_to_unified.sh new file mode 100644 index 0000000000000000000000000000000000000000..906c88b849efad920df4c4b14656c3a685d6fe58 --- /dev/null +++ b/src/scripts/convert_to_unified.sh @@ -0,0 +1,450 @@ +#!/bin/bash + +TARGET_DIR=../../data/unified + +if [ ! -d "${TARGET_DIR}" ]; then + mkdir -p ${TARGET_DIR} +fi + + +# DSTC 2 +if [ ! -d "${TARGET_DIR}/DSTC2" ]; then + echo "Converting DSTC2 to unified format" + python preprocess/DSTC2.py \ + --input_dir ../../data/origin/DSTC2 \ + --output_dir ${TARGET_DIR}/DSTC2 +fi + +# sim-M +if [ ! -d "${TARGET_DIR}/sim-M" ]; then + echo "Converting sim-M to unified format" + python preprocess/Google_Simulated_Dialogue.py \ + --input_dir ../../data/origin/sim-M \ + --output_dir ${TARGET_DIR}/sim-M \ + --domain "movie" +fi + +# sim-R +if [ ! -d "${TARGET_DIR}/sim-R" ]; then + echo "Converting sim-R to unified format" + python preprocess/Google_Simulated_Dialogue.py \ + --input_dir ../../data/origin/sim-R \ + --output_dir ${TARGET_DIR}/sim-R \ + --domain "restaurant" +fi + +# MultiWOZ 2.2 +if [ ! -d "${TARGET_DIR}/MultiWOZ_2.2" ]; then + echo "Converting MultiWOZ_2.2 to unified format" + python preprocess/MultiWOZ_2.2.py \ + --input_dir ../../data/origin/MultiWOZ_2.2 \ + --output_dir ${TARGET_DIR}/MultiWOZ_2.2 +fi + +# SGD +if [ ! -d "${TARGET_DIR}/SGD" ]; then + echo "Converting SGD to unified format" + python preprocess/SGD.py \ + --input_dir ../../data/origin/SGD \ + --output_dir ${TARGET_DIR}/SGD +fi + +# MultiWOZ 2.1 +if [ ! -d "${TARGET_DIR}/MultiWOZ_2.1" ]; then + echo "Converting MultiWOZ_2.1 to unified format" + python preprocess/MultiWOZ_2.1.py \ + --input_dir ../../data/origin/MultiWOZ_2.1 \ + --output_dir ${TARGET_DIR}/MultiWOZ_2.1 +fi + +# Spider +if [ ! -d "${TARGET_DIR}/Spider" ]; then + echo "Converting Spider to unified format" + python preprocess/Spider.py \ + --input_dir ../../data/origin/Spider \ + --output_dir ${TARGET_DIR}/Spider +fi + +# SParC +if [ ! -d "${TARGET_DIR}/SParC" ]; then + echo "Converting SParC to unified format" + python preprocess/SParC.py \ + --input_dir ../../data/origin/SParC/sparc \ + --output_dir ${TARGET_DIR}/SParC +fi + +# CoSQL +if [ ! -d "${TARGET_DIR}/CoSQL" ]; then + echo "Converting CoSQL to unified format" + python preprocess/CoSQL.py \ + --input_dir ../../data/origin/CoSQL/cosql_dataset \ + --output_dir ${TARGET_DIR}/CoSQL +fi + +# MultiDoGo +if [ ! -d "${TARGET_DIR}/MultiDoGo" ]; then + echo "Converting MultiDoGo to unified format" + python preprocess/MultiDoGo.py \ + --input_dir ../../data/origin/MultiDoGo \ + --output_dir ${TARGET_DIR}/MultiDoGo +fi + +# Restaurant8k +if [ ! -d "${TARGET_DIR}/Restaurant8k" ]; then + echo "Converting Restaurant8k to unified format" + python preprocess/Restaurant8k.py \ + --input_dir ../../data/origin/Restaurant8k \ + --output_dir ${TARGET_DIR}/Restaurant8k +fi + +# SNIPS +if [ ! -d "${TARGET_DIR}/SNIPS" ]; then + echo "Converting SNIPS to unified format" + python preprocess/SNIPS.py \ + --input_dir ../../data/origin/SNIPS \ + --output_dir ${TARGET_DIR}/SNIPS +fi + +# DDRel +if [ ! -d "${TARGET_DIR}/DDRel" ]; then + echo "Converting DDRel to unified format" + python preprocess/DDRel.py \ + --input_dir ../../data/origin/DDRel \ + --output_dir ${TARGET_DIR}/DDRel +fi + +# CamRest676 +if [ ! -d "${TARGET_DIR}/CamRest676" ]; then + echo "Converting CamRest676 to unified format" + python preprocess/CamRest676.py \ + --input_dir ../../data/origin/CamRest676 \ + --output_dir ${TARGET_DIR}/CamRest676 +fi + +# CANARD +if [ ! -d "${TARGET_DIR}/CANARD" ]; then + echo "Converting CANARD to unified format" + python preprocess/CANARD.py \ + --input_dir ../../data/origin/CANARD \ + --output_dir ${TARGET_DIR}/CANARD +fi + +# AlphaNLI +if [ ! -d "${TARGET_DIR}/AlphaNLI" ]; then + echo "Converting AlphaNLI to unified format" + python preprocess/AlphaNLI.py \ + --input_dir ../../data/origin/AlphaNLI \ + --output_dir ${TARGET_DIR}/AlphaNLI +fi + +# CoQA +if [ ! -d "${TARGET_DIR}/CoQA" ]; then + echo "Converting CoQA to unified format" + python preprocess/CoQA.py \ + --input_dir ../../data/origin/CoQA \ + --output_dir ${TARGET_DIR}/CoQA +fi + +# DoQA +if [ ! -d "${TARGET_DIR}/DoQA" ]; then + echo "Converting DoQA to unified format" + python preprocess/DoQA.py \ + --input_dir ../../data/origin/DoQA \ + --output_dir ${TARGET_DIR}/DoQA +fi + +# FriendsQA +if [ ! -d "${TARGET_DIR}/FriendsQA" ]; then + echo "Converting FriendsQA to unified format" + python preprocess/FriendsQA.py \ + --input_dir ../../data/origin/FriendsQA \ + --output_dir ${TARGET_DIR}/FriendsQA +fi + +# Molweni +if [ ! -d "${TARGET_DIR}/Molweni" ]; then + echo "Converting Molweni to unified format" + python preprocess/Molweni.py \ + --input_dir ../../data/origin/Molweni \ + --output_dir ${TARGET_DIR}/Molweni +fi + +# QuAC +if [ ! -d "${TARGET_DIR}/QuAC" ]; then + echo "Converting QuAC to unified format" + python preprocess/QuAC.py \ + --input_dir ../../data/origin/QuAC \ + --output_dir ${TARGET_DIR}/QuAC +fi + +# SQuAD 2.0 +if [ ! -d "${TARGET_DIR}/SQuAD_2.0" ]; then + echo "Converting SQuAD 2.0 to unified format" + python preprocess/SQuAD_2.0.py \ + --input_dir ../../data/origin/SQuAD_2.0 \ + --output_dir ${TARGET_DIR}/SQuAD_2.0 +fi + +# CommonsenseQA +if [ ! -d "${TARGET_DIR}/CommonsenseQA" ]; then + echo "Converting CommonsenseQA to unified format" + python preprocess/CommonsenseQA.py \ + --input_dir ../../data/origin/CommonsenseQA \ + --output_dir ${TARGET_DIR}/CommonsenseQA +fi + +# CommonsenseQA 2.0 +if [ ! -d "${TARGET_DIR}/CommonsenseQA_2.0" ]; then + echo "Converting CommonsenseQA 2.0 to unified format" + python preprocess/CommonsenseQA_2.0.py \ + --input_dir ../../data/origin/CommonsenseQA_2.0 \ + --output_dir ${TARGET_DIR}/CommonsenseQA_2.0 +fi + +# CosmosQA +if [ ! -d "${TARGET_DIR}/CosmosQA" ]; then + echo "Converting CosmosQA to unified format" + python preprocess/CosmosQA.py \ + --input_dir ../../data/origin/CosmosQA \ + --output_dir ${TARGET_DIR}/CosmosQA +fi + +# DREAM +if [ ! -d "${TARGET_DIR}/DREAM" ]; then + echo "Converting DREAM to unified format" + python preprocess/DREAM.py \ + --input_dir ../../data/origin/DREAM \ + --output_dir ${TARGET_DIR}/DREAM +fi + +# MuTual +if [ ! -d "${TARGET_DIR}/MuTual" ]; then + echo "Converting MuTual to unified format" + python preprocess/MuTual.py \ + --input_dir ../../data/origin/MuTual \ + --output_dir ${TARGET_DIR}/MuTual +fi + +# RACE +if [ ! -d "${TARGET_DIR}/RACE" ]; then + echo "Converting RACE to unified format" + python preprocess/RACE.py \ + --input_dir ../../data/origin/RACE \ + --output_dir ${TARGET_DIR}/RACE +fi + +# PCMD +if [ ! -d "${TARGET_DIR}/PCMD" ]; then + echo "Converting PCMD to unified format" + python preprocess/PCMD.py \ + --input_dir ../../data/origin/PCMD \ + --output_dir ${TARGET_DIR}/PCMD +fi + +# SocialIQA +if [ ! -d "${TARGET_DIR}/SocialIQA" ]; then + echo "Converting SocialIQA to unified format" + python preprocess/SocialIQA.py \ + --input_dir ../../data/origin/SocialIQA \ + --output_dir ${TARGET_DIR}/SocialIQA +fi + +# DialogSum +if [ ! -d "${TARGET_DIR}/DialogSum" ]; then + echo "Converting DialogSum to unified format" + python preprocess/DialogSum.py \ + --input_dir ../../data/origin/DialogSum \ + --output_dir ${TARGET_DIR}/DialogSum +fi + +# SAMSum +if [ ! -d "${TARGET_DIR}/SAMSum" ]; then + echo "Converting SAMSum to unified format" + python preprocess/SAMSum.py \ + --input_dir ../../data/origin/SAMSum \ + --output_dir ${TARGET_DIR}/SAMSum +fi + +# CMUDoG +if [ ! -d "${TARGET_DIR}/CMUDoG" ]; then + echo "Converting CMUDoG to unified format" + python preprocess/CMUDoG.py \ + --input_dir ../../data/origin/CMUDoG \ + --output_dir ${TARGET_DIR}/CMUDoG +fi + +# CommonsenseDialog +if [ ! -d "${TARGET_DIR}/CommonsenseDialog" ]; then + echo "Converting CommonsenseDialog to unified format" + python preprocess/CommonsenseDialog.py \ + --input_dir ../../data/origin/CommonsenseDialog \ + --output_dir ${TARGET_DIR}/CommonsenseDialog +fi + +# EmpathicDialogue +if [ ! -d "${TARGET_DIR}/EmpathicDialogue" ]; then + echo "Converting EmpathicDialogue to unified format" + python preprocess/EmpathicDialogue.py \ + --input_dir ../../data/origin/EmpathicDialogue \ + --output_dir ${TARGET_DIR}/EmpathicDialogue +fi + +# NarrativeQA +if [ ! -d "${TARGET_DIR}/NarrativeQA" ]; then + echo "Converting NarrativeQA to unified format" + python preprocess/NarrativeQA.py \ + --input_dir ../../data/origin/NarrativeQA \ + --output_dir ${TARGET_DIR}/NarrativeQA +fi + +# Soccer +if [ ! -d "${TARGET_DIR}/Soccer" ]; then + echo "Converting Soccer to unified format" + python preprocess/Soccer.py \ + --input_dir ../../data/origin/Soccer \ + --output_dir ${TARGET_DIR}/Soccer +fi + +# Incar +if [ ! -d "${TARGET_DIR}/Incar" ]; then + echo "Converting Incar to unified format" + python preprocess/Incar.py \ + --input_dir ../../data/origin/Incar \ + --output_dir ${TARGET_DIR}/Incar +fi + +# CornellMovie +if [ ! -d "${TARGET_DIR}/CornellMovie" ]; then + echo "Converting CornellMovie to unified format" + python preprocess/CornellMovie.py \ + --input_dir ../../data/origin/CornellMovie \ + --output_dir ${TARGET_DIR}/CornellMovie +fi + +# DailyDialog +if [ ! -d "${TARGET_DIR}/DailyDialog" ]; then + echo "Converting DailyDialog to unified format" + python preprocess/DailyDialog.py \ + --input_dir ../../data/origin/DailyDialog \ + --output_dir ${TARGET_DIR}/DailyDialog +fi + +# EmoryNLP +if [ ! -d "${TARGET_DIR}/EmoryNLP" ]; then + echo "Converting EmoryNLP to unified format" + python preprocess/EmoryNLP.py \ + --input_dir ../../data/origin/EmoryNLP \ + --output_dir ${TARGET_DIR}/EmoryNLP +fi + +# GoEmotions +if [ ! -d "${TARGET_DIR}/GoEmotions" ]; then + echo "Converting GoEmotions to unified format" + python preprocess/GoEmotions.py \ + --input_dir ../../data/origin/GoEmotions \ + --output_dir ${TARGET_DIR}/GoEmotions +fi + +# MELD +if [ ! -d "${TARGET_DIR}/MELD" ]; then + echo "Converting MELD to unified format" + python preprocess/MELD.py \ + --input_dir ../../data/origin/MELD \ + --output_dir ${TARGET_DIR}/MELD +fi + +# IEMOCAP +if [ ! -d "${TARGET_DIR}/IEMOCAP" ]; then + echo "Converting IEMOCAP to unified format" + python preprocess/IEMOCAP.py \ + --input_dir ../../data/origin/IEMOCAP \ + --output_dir ${TARGET_DIR}/IEMOCAP +fi + +# Banking77 +if [ ! -d "${TARGET_DIR}/Banking77" ]; then + echo "Converting Banking77 to unified format" + python preprocess/Banking77.py \ + --input_dir ../../data/origin/Banking77 \ + --output_dir ${TARGET_DIR}/Banking77 +fi + +# CLINC150 +if [ ! -d "${TARGET_DIR}/CLINC150" ]; then + echo "Converting CLINC150 to unified format" + python preprocess/CLINC150.py \ + --input_dir ../../data/origin/CLINC150 \ + --output_dir ${TARGET_DIR}/CLINC150 +fi + +# HWU64 +if [ ! -d "${TARGET_DIR}/HWU64" ]; then + echo "Converting HWU64 to unified format" + python preprocess/HWU64.py \ + --input_dir ../../data/origin/HWU64 \ + --output_dir ${TARGET_DIR}/HWU64 +fi + +# E2E +if [ ! -d "${TARGET_DIR}/E2E" ]; then + echo "Converting E2E to unified format" + python preprocess/E2E.py \ + --input_dir ../../data/origin/E2E \ + --output_dir ${TARGET_DIR}/E2E +fi + +# RNNLG +if [ ! -d "${TARGET_DIR}/RNNLG" ]; then + echo "Converting RNNLG to unified format" + python preprocess/RNNLG.py \ + --input_dir ../../data/origin/RNNLG \ + --output_dir ${TARGET_DIR}/RNNLG +fi + +# PERSONA-CHAT +if [ ! -d "${TARGET_DIR}/PERSONA-CHAT" ]; then + echo "Converting PERSONA-CHAT to unified format" + python preprocess/PERSONA-CHAT.py \ + --input_dir ../../data/origin/PERSONA-CHAT \ + --output_dir ${TARGET_DIR}/PERSONA-CHAT +fi + +# ENLP +if [ ! -d "${TARGET_DIR}/ENLP" ]; then + echo "Converting ENLP to unified format" + python preprocess/ENLP.py \ + --input_dir ../../data/origin/ENLP \ + --output_dir ${TARGET_DIR}/ENLP +fi + +# ASTE +if [ ! -d "${TARGET_DIR}/ASTE" ]; then + echo "Converting ASTE to unified format" + python preprocess/ASTE.py \ + --input_dir ../../data/origin/ASTE \ + --output_dir ${TARGET_DIR}/ASTE +fi + +# MAMS-ACSA +if [ ! -d "${TARGET_DIR}/MAMS-ACSA" ]; then + echo "Converting MAMS-ACSA to unified format" + python preprocess/MAMS-ACSA.py \ + --input_dir ../../data/origin/MAMS/MAMS-ACSA/raw \ + --output_dir ${TARGET_DIR}/MAMS-ACSA +fi + +# MAMS-ATSA +if [ ! -d "${TARGET_DIR}/MAMS-ATSA" ]; then + echo "Converting MAMS-ATSA to unified format" + python preprocess/MAMS-ATSA.py \ + --input_dir ../../data/origin/MAMS/MAMS-ATSA/raw \ + --output_dir ${TARGET_DIR}/MAMS-ATSA +fi + +# Twitter +if [ ! -d "${TARGET_DIR}/Twitter" ]; then + echo "Converting Twitter to unified format" + python preprocess/Twitter.py \ + --input_dir ../../data/origin/Twitter \ + --output_dir ${TARGET_DIR}/Twitter +fi \ No newline at end of file diff --git a/src/scripts/convert_to_unified_dialoglue.sh b/src/scripts/convert_to_unified_dialoglue.sh new file mode 100644 index 0000000000000000000000000000000000000000..2154fd8d2b5c3b3b8ac8c2be6af2764a99f31d04 --- /dev/null +++ b/src/scripts/convert_to_unified_dialoglue.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +TARGET_DIR=../../data/unified_dialoglue + +if [ ! -d "${TARGET_DIR}" ]; then + mkdir -p ${TARGET_DIR} +fi + + +# Banking77 +if [ ! -d "${TARGET_DIR}/Banking77" ]; then + echo "Converting Banking77 to unified format" + python preprocess/Banking77.py \ + --input_dir ../../data/origin/dialoglue/banking \ + --output_dir ${TARGET_DIR}/Banking77 +fi + +# CLINC150 +if [ ! -d "${TARGET_DIR}/CLINC150" ]; then + echo "Converting CLINC150 to unified format" + python preprocess/Banking77.py \ + --input_dir ../../data/origin/dialoglue/clinc \ + --output_dir ${TARGET_DIR}/CLINC150 +fi + +# HWU64 +if [ ! -d "${TARGET_DIR}/HWU64" ]; then + echo "Converting HWU64 to unified format" + python preprocess/Banking77.py \ + --input_dir ../../data/origin/dialoglue/hwu \ + --output_dir ${TARGET_DIR}/HWU64 +fi + +# Restaurant8k +if [ ! -d "${TARGET_DIR}/Restaurant8k" ]; then + echo "Converting Restaurant8k to unified format" + python preprocess/Restaurant8k.py \ + --input_dir ../../data/origin/dialoglue/restaurant8k \ + --output_dir ${TARGET_DIR}/Restaurant8k +fi + +# DSTC 8 +if [ ! -d "${TARGET_DIR}/DSTC8" ]; then + echo "Converting DSTC8 to unified format" + python preprocess/DSTC8.py \ + --input_dir ../../data/origin/dialoglue/dstc8 \ + --output_dir ${TARGET_DIR}/DSTC8 +fi + +# TOP +if [ ! -d "${TARGET_DIR}/TOP" ]; then + echo "Converting TOP to unified format" + python preprocess/TOP.py \ + --input_dir ../../data/origin/dialoglue/top \ + --output_dir ${TARGET_DIR}/TOP +fi \ No newline at end of file diff --git a/src/scripts/download.sh b/src/scripts/download.sh new file mode 100644 index 0000000000000000000000000000000000000000..1cc77e7df89c403a6b37c07d2093c181b65a056b --- /dev/null +++ b/src/scripts/download.sh @@ -0,0 +1,314 @@ +#!/bin/bash + +# set +ex + +TARGET_DIR=../../data/origin + +if [ ! -d "${TARGET_DIR}" ]; then + mkdir -p ${TARGET_DIR} + mkdir -p ${TARGET_DIR}/tmp +fi + +# ---------------------------- download all data ---------------------------- # +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/matthen/dstc/releases/download/v1/dstc2_traindev.tar.gz +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/matthen/dstc/releases/download/v1/dstc2_test.tar.gz +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/google-research-datasets/simulated-dialogue/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/budzianowski/multiwoz/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/google-research-datasets/dstc8-schema-guided-dialogue/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://docs.google.com/uc\?export\=download\&confirm\=t\&id\=1403EGqzIDoHMdQF4c9Bkyl7dZLZ5Wt6J -o spider.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://docs.google.com/uc\?export\=download\&confirm\=t\&id\=1Uu7NMHTR1tdQw1t7bAuM7OPU4LElVKfg -o sparc.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://docs.google.com/uc\?export\=download\&confirm\=t\&id\=1Y3ydpFiQQ3FC0bzdfy3groV95O_f1nXF -o cosql_dataset.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/awslabs/multi-domain-goal-oriented-dialogues-dataset/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/PolyAI-LDN/task-specific-datasets/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/sonos/nlu-benchmark/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://docs.google.com/uc\?export\=download\&confirm\=t\&id\=149M_pMTcQwZE_a-DI4YTckeFfHps-Y2T -o ${TARGET_DIR}/ddrel.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/terryqj0107/GECOR/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://obj.umiacs.umd.edu/elgohary/CANARD_Release.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://storage.googleapis.com/ai2-mosaic/public/alphanli/alphanli-train-dev.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://nlp.stanford.edu/data/coqa/coqa-train-v1.0.json +aria2c -c -x 4 -d ${TARGET_DIR} https://nlp.stanford.edu/data/coqa/coqa-dev-v1.0.json +aria2c -c -x 4 -d ${TARGET_DIR} http://ixa2.si.ehu.es/convai/doqa-v2.1.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/emorynlp/FriendsQA/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/HIT-SCIR/Molweni/archive/refs/heads/main.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://s3.amazonaws.com/my89public/quac/train_v0.2.json +aria2c -c -x 4 -d ${TARGET_DIR} https://s3.amazonaws.com/my89public/quac/val_v0.2.json +aria2c -c -x 4 -d ${TARGET_DIR} https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json +aria2c -c -x 4 -d ${TARGET_DIR} https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json +aria2c -c -x 4 -d ${TARGET_DIR} https://s3.amazonaws.com/commensenseqa/train_rand_split.jsonl +aria2c -c -x 4 -d ${TARGET_DIR} https://s3.amazonaws.com/commensenseqa/dev_rand_split.jsonl +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/allenai/csqa2/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://storage.googleapis.com/ai2-mosaic/public/cosmosqa/cosmosqa-data.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/nlpdata/dream/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/Nealcly/MuTual/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} http://www.cs.cmu.edu/~glai1/data/race/RACE.tar.gz +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/emorynlp/reading-comprehension/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://storage.googleapis.com/ai2-mosaic/public/socialiqa/socialiqa-train-dev.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/cylnlp/dialogsum/archive/refs/heads/main.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://arxiv.org/src/1911.12237v2/anc/corpus.7z +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/festvox/datasets-CMU_DoG/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/alexa/Commonsense-Dialogues/archive/refs/heads/main.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://dl.fbaipublicfiles.com/parlai/empatheticdialogues/empatheticdialogues.tar.gz +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/deepmind/narrativeqa/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/SmartDataAnalytics/KG-Copy_Network/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} http://zissou.infosci.cornell.edu/convokit/datasets/movie-corpus/movie-corpus.zip +wget -P ${TARGET_DIR} http://yanran.li/files/ijcnlp_dailydialog.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/caskcsg/SPCL/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/dinobby/HypEmo/archive/refs/heads/main.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/clinc/oos-eval/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/xliuhw/NLU-Evaluation-Data/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/tuetschek/e2e-dataset/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/shawnwun/RNNLG/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} http://parl.ai/downloads/convai2/convai2_fix_723.tgz +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/emorynlp/character-identification/archive/character-identification-2.0.tar.gz +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/ZubinGou/multi-view-prompting/archive/refs/heads/main.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://github.com/siat-nlp/MAMS-for-ABSA/archive/refs/heads/master.zip +aria2c -c -x 4 -d ${TARGET_DIR} https://docs.google.com/uc\?export\=download\&confirm\=t\&id\=0B8yp1gOBCztyVVVoLTdNZ1JHYVU -o twitter.zip +# ---------------------------------------------------------------------------- # + +function untar { + if [ ! -d "$2" ]; then + echo "Untar $3" + mkdir -p $2 && tar -xvf $1 -C $2 + else + echo "Skip $3" + fi +} + +function Unzip { + if [ ! -d "$2" ]; then + echo "Unzip $3" + mkdir -p $2 && unzip $1 -d $2 + else + echo "Skip $3" + fi +} + +# ------------------------------------------ DST TASK ------------------------------------------ # +# DSTC 2 +# train && dev +untar ${TARGET_DIR}/dstc2_traindev.tar.gz ${TARGET_DIR}/DSTC2/traindev DSTC2-train-dev +untar ${TARGET_DIR}/dstc2_test.tar.gz ${TARGET_DIR}/DSTC2/test DSTC2-test + +# sim-M & sim-R +Unzip ${TARGET_DIR}/simulated-dialogue-master.zip ${TARGET_DIR}/tmp/sim "sim-M&sim-R" && \ +mv ${TARGET_DIR}/tmp/sim/simulated-dialogue-master/sim-R ${TARGET_DIR}/sim-R && \ +mv ${TARGET_DIR}/tmp/sim/simulated-dialogue-master/sim-M ${TARGET_DIR}/sim-M + +# MultiWOZ 2.1 & 2.2 +Unzip ${TARGET_DIR}/multiwoz-master.zip ${TARGET_DIR}/tmp/multiwoz "MultiWOZ 2.2" && \ +mv ${TARGET_DIR}/tmp/multiwoz/multiwoz-master/data/MultiWOZ_2.2 ${TARGET_DIR}/MultiWOZ_2.2 && \ +Unzip ${TARGET_DIR}/tmp/multiwoz/multiwoz-master/data/MultiWOZ_2.1.zip ${TARGET_DIR}/tmp/multiwoz_2.1 "MultiWOZ 2.1" && \ +mv ${TARGET_DIR}/tmp/multiwoz_2.1/MultiWOZ_2.1 ${TARGET_DIR}/MultiWOZ_2.1 + +# SGD +Unzip ${TARGET_DIR}/dstc8-schema-guided-dialogue-master.zip ${TARGET_DIR}/tmp/sgd "SGD" && \ +mkdir -p ${TARGET_DIR}/SGD && \ +mv ${TARGET_DIR}/tmp/sgd/dstc8-schema-guided-dialogue-master/train ${TARGET_DIR}/SGD && \ +mv ${TARGET_DIR}/tmp/sgd/dstc8-schema-guided-dialogue-master/dev ${TARGET_DIR}/SGD && \ +mv ${TARGET_DIR}/tmp/sgd/dstc8-schema-guided-dialogue-master/test ${TARGET_DIR}/SGD + +# ------------------------------------------ T2S TASK ------------------------------------------ # +# Spider +Unzip ${TARGET_DIR}/spider.zip ${TARGET_DIR}/Spider "Spider" + +# SParC +Unzip ${TARGET_DIR}/sparc.zip ${TARGET_DIR}/SParC "SParC" + +# CoSQL +Unzip ${TARGET_DIR}/cosql_dataset.zip ${TARGET_DIR}/CoSQL "CoSQL" + +# ------------------------------------------ SF TASK ------------------------------------------ # +# MultiDoGo +Unzip ${TARGET_DIR}/multi-domain-goal-oriented-dialogues-dataset-master.zip ${TARGET_DIR}/tmp/multidogo "MultiDoGo" && \ +mv ${TARGET_DIR}/tmp/multidogo/multi-domain-goal-oriented-dialogues-dataset-master/data/paper_splits/splits_annotated_at_turn_level ${TARGET_DIR}/MultiDoGo + +# Restaurant8k +Unzip ${TARGET_DIR}/task-specific-datasets-master.zip ${TARGET_DIR}/tmp/Restaurant8k "Restaurant8k" && \ +mv ${TARGET_DIR}/tmp/Restaurant8k/task-specific-datasets-master/span_extraction/restaurant8k ${TARGET_DIR}/Restaurant8k + +# SNIPS +Unzip ${TARGET_DIR}/nlu-benchmark-master.zip ${TARGET_DIR}/tmp/snips "SNIPS" && \ +mv ${TARGET_DIR}/tmp/snips/nlu-benchmark-master/2017-06-custom-intent-engines ${TARGET_DIR}/SNIPS + +# ------------------------------------------ RRR TASK ------------------------------------------ # +# DDRel +Unzip ${TARGET_DIR}/ddrel.zip ${TARGET_DIR}/tmp/ddrel "DDRel" && \ +mv ${TARGET_DIR}/tmp/ddrel/ddrel ${TARGET_DIR}/DDRel + +# ------------------------------------------ QCR TASK ------------------------------------------ # +# CamRest676 +Unzip ${TARGET_DIR}/GECOR-master.zip ${TARGET_DIR}/tmp/camrest676 "CamRest676" && \ +mv ${TARGET_DIR}/tmp/camrest676/GECOR-master/CamRest676_for_coreference_and_ellipsis_resolution ${TARGET_DIR}/CamRest676 + +# CANARD +Unzip ${TARGET_DIR}/CANARD_Release.zip ${TARGET_DIR}/tmp/CANARD "CANARD" && \ +mv ${TARGET_DIR}/tmp/CANARD/CANARD_Release ${TARGET_DIR}/CANARD + +# ------------------------------------------ NLI TASK ------------------------------------------ # +Unzip ${TARGET_DIR}/alphanli-train-dev.zip ${TARGET_DIR}/AlphaNLI + +# ------------------------------------------ MRC TASK ------------------------------------------ # +# CoQA +mkdir -p ${TARGET_DIR}/CoQA && \ +mv ${TARGET_DIR}/coqa-train-v1.0.json ${TARGET_DIR}/CoQA && \ +mv ${TARGET_DIR}/coqa-dev-v1.0.json ${TARGET_DIR}/CoQA + +# DoQA +Unzip ${TARGET_DIR}/doqa-v2.1.zip ${TARGET_DIR}/tmp/doqa "DoQA" && \ +mv ${TARGET_DIR}/tmp/doqa/doqa-v2.1/doqa_dataset ${TARGET_DIR}/DoQA + +# FriendsQA +Unzip ${TARGET_DIR}/FriendsQA-master.zip ${TARGET_DIR}/tmp/friendsqa "FriendsQA" && \ +mv ${TARGET_DIR}/tmp/friendsqa/FriendsQA-master/dat ${TARGET_DIR}/FriendsQA + +# Molweni +Unzip ${TARGET_DIR}/Molweni-main.zip ${TARGET_DIR}/tmp/molweni "Molweni" && \ +mv "${TARGET_DIR}/tmp/molweni/Molweni-main/MRC(withDiscourse)" ${TARGET_DIR}/Molweni + +# QuAC +mkdir -p ${TARGET_DIR}/QuAC && \ +mv ${TARGET_DIR}/train_v0.2.json ${TARGET_DIR}/QuAC && \ +mv ${TARGET_DIR}/val_v0.2.json ${TARGET_DIR}/QuAC + +# SQuAD 2.0 +mkdir -p ${TARGET_DIR}/SQuAD_2.0 && \ +mv ${TARGET_DIR}/train-v2.0.json ${TARGET_DIR}/SQuAD_2.0 && \ +mv ${TARGET_DIR}/dev-v2.0.json ${TARGET_DIR}/SQuAD_2.0 + +# ---------------------------------------- MCQA TASK ---------------------------------------- # +# CommonsenseQA +mkdir -p ${TARGET_DIR}/CommonsenseQA && \ +mv ${TARGET_DIR}/train_rand_split.jsonl ${TARGET_DIR}/CommonsenseQA && \ +mv ${TARGET_DIR}/dev_rand_split.jsonl ${TARGET_DIR}/CommonsenseQA + +# CommonsenseQA 2.0 +Unzip ${TARGET_DIR}/csqa2-master.zip ${TARGET_DIR}/tmp/csqa2 "CommonsenseQA 2.0" && \ +mkdir -p ${TARGET_DIR}/CommonsenseQA_2.0 && \ +gunzip ${TARGET_DIR}/tmp/csqa2/csqa2-master/dataset/CSQA2_train.json.gz && \ +gunzip ${TARGET_DIR}/tmp/csqa2/csqa2-master/dataset/CSQA2_dev.json.gz && +mv ${TARGET_DIR}/tmp/csqa2/csqa2-master/dataset/*.json ${TARGET_DIR}/CommonsenseQA_2.0 + +# CosmosQA +Unzip ${TARGET_DIR}/cosmosqa-data.zip ${TARGET_DIR}/tmp/cosmosqa "CosmosQA" && \ +mv ${TARGET_DIR}/tmp/cosmosqa ${TARGET_DIR}/CosmosQA + +# DREAM +Unzip ${TARGET_DIR}/dream-master.zip ${TARGET_DIR}/tmp/dream "DREAM" && \ +mv ${TARGET_DIR}/tmp/dream/dream-master/data ${TARGET_DIR}/DREAM + +# MuTual +Unzip ${TARGET_DIR}/MuTual-master.zip ${TARGET_DIR}/tmp/mutual "MuTual" && \ +mv ${TARGET_DIR}/tmp/mutual/MuTual-master/data ${TARGET_DIR}/MuTual + +# RACE +untar ${TARGET_DIR}/RACE.tar.gz ${TARGET_DIR}/tmp/RACE "RACE" && \ +mv ${TARGET_DIR}/tmp/RACE/RACE ${TARGET_DIR}/RACE + +# PCMD +Unzip ${TARGET_DIR}/reading-comprehension-master.zip ${TARGET_DIR}/tmp/pcmd "PCMD" && \ +mv ${TARGET_DIR}/tmp/pcmd/reading-comprehension-master/json ${TARGET_DIR}/PCMD + +# SocialIQA +Unzip ${TARGET_DIR}/socialiqa-train-dev.zip ${TARGET_DIR}/tmp/socialiqa "SocialIQA" && \ +mv ${TARGET_DIR}/tmp/socialiqa/socialiqa-train-dev ${TARGET_DIR}/SocialIQA + +# --------------------------------------- DS TASK --------------------------------------- # +# DialogSum +Unzip ${TARGET_DIR}/dialogsum-main.zip ${TARGET_DIR}/tmp/dialogsum "DialogSum" && \ +mv ${TARGET_DIR}/tmp/dialogsum/dialogsum-main/DialogSum_Data ${TARGET_DIR}/DialogSum + +# SAMSum +7z x ${TARGET_DIR}/corpus.7z -o${TARGET_DIR}/SAMSum + +# ----------------------------------------- DCRG TASK ----------------------------------------- # +# CMUDoG +Unzip ${TARGET_DIR}/datasets-CMU_DoG-master.zip ${TARGET_DIR}/tmp/cmdog "CMUDoG" && \ +mv ${TARGET_DIR}/tmp/cmdog/datasets-CMU_DoG-master ${TARGET_DIR}/CMUDoG + +# CommonsenseDialog +Unzip ${TARGET_DIR}/Commonsense-Dialogues-main.zip ${TARGET_DIR}/tmp/csdialog "CommonsenseDialog" && \ +mv ${TARGET_DIR}/tmp/csdialog/Commonsense-Dialogues-main/data ${TARGET_DIR}/CommonsenseDialog + +# EmpathicDialogue +untar ${TARGET_DIR}/empatheticdialogues.tar.gz ${TARGET_DIR}/tmp/empatheticdialogues "EmpathicDialogue" && \ +mv ${TARGET_DIR}/tmp/empatheticdialogues/empatheticdialogues ${TARGET_DIR}/EmpathicDialogue + +# NarrativeQA +Unzip ${TARGET_DIR}/narrativeqa-master.zip ${TARGET_DIR}/tmp/narrativeqa "NarrativeQA" && \ +mkdir -p ${TARGET_DIR}/NarrativeQA && \ +mv ${TARGET_DIR}/tmp/narrativeqa/narrativeqa-master/third_party ${TARGET_DIR}/NarrativeQA && \ +mv ${TARGET_DIR}/tmp/narrativeqa/narrativeqa-master/qaps.csv ${TARGET_DIR}/NarrativeQA + +# Soccer & Incar +Unzip ${TARGET_DIR}/KG-Copy_Network-master.zip ${TARGET_DIR}/tmp/kgcn "Soccer&Incar" && \ +mkdir -p ${TARGET_DIR}/Incar && \ +mv ${TARGET_DIR}/tmp/kgcn/KG-Copy_Network-master/data/KVR/*incar.txt ${TARGET_DIR}/Incar && \ +mv ${TARGET_DIR}/tmp/kgcn/KG-Copy_Network-master/data/KVR ${TARGET_DIR}/Soccer + +# CornellMovie +Unzip ${TARGET_DIR}/movie-corpus.zip ${TARGET_DIR}/tmp/cornellmovie "CornellMovie" && \ +mv ${TARGET_DIR}/tmp/cornellmovie/movie-corpus ${TARGET_DIR}/CornellMovie + +# ----------------------------------------- ER TASK ----------------------------------------- # +# DailyDialog +Unzip ${TARGET_DIR}/ijcnlp_dailydialog.zip ${TARGET_DIR}/tmp/dailydialog "DailyDialog" && \ +unzip ${TARGET_DIR}/tmp/dailydialog/ijcnlp_dailydialog/train.zip -d ${TARGET_DIR}/tmp/dailydialog/ijcnlp_dailydialog && \ +unzip ${TARGET_DIR}/tmp/dailydialog/ijcnlp_dailydialog/validation.zip -d ${TARGET_DIR}/tmp/dailydialog/ijcnlp_dailydialog && \ +unzip ${TARGET_DIR}/tmp/dailydialog/ijcnlp_dailydialog/test.zip -d ${TARGET_DIR}/tmp/dailydialog/ijcnlp_dailydialog && \ +mv ${TARGET_DIR}/tmp/dailydialog/ijcnlp_dailydialog ${TARGET_DIR}/DailyDialog + +# EmoryNLP, MELD, IEMOCAP +Unzip ${TARGET_DIR}/SPCL-master.zip ${TARGET_DIR}/tmp/spcl "EmoryNLP, MELD, IEMOCAP" && \ +mv ${TARGET_DIR}/tmp/spcl/SPCL-master/emorynlp ${TARGET_DIR}/EmoryNLP && \ +mv ${TARGET_DIR}/tmp/spcl/SPCL-master/MELD ${TARGET_DIR}/MELD && \ +mv ${TARGET_DIR}/tmp/spcl/SPCL-master/IEMOCAP ${TARGET_DIR}/IEMOCAP + +# GoEmotions +Unzip ${TARGET_DIR}/HypEmo-main.zip ${TARGET_DIR}/tmp/goemotions "GoEmotions" && \ +mv ${TARGET_DIR}/tmp/goemotions/HypEmo-main/data/go_emotion ${TARGET_DIR}/GoEmotions + +# ----------------------------------------- ID TASK ----------------------------------------- # +# Banking77 +Unzip ${TARGET_DIR}/task-specific-datasets-master.zip ${TARGET_DIR}/tmp/banking77 "Banking77" && \ +mv ${TARGET_DIR}/tmp/banking77/task-specific-datasets-master/banking_data ${TARGET_DIR}/Banking77 + +# CLINC150 +Unzip ${TARGET_DIR}/oos-eval-master.zip ${TARGET_DIR}/tmp/clinc150 "CLINC150" && \ +mv ${TARGET_DIR}/tmp/clinc150/oos-eval-master/data ${TARGET_DIR}/CLINC150 + +# HWU64 +Unzip ${TARGET_DIR}/NLU-Evaluation-Data-master.zip ${TARGET_DIR}/tmp/hwu64 "HWU64" && \ +mv ${TARGET_DIR}/tmp/hwu64/NLU-Evaluation-Data-master/CrossValidation/autoGeneFromRealAnno/autoGene_2018_03_22-13_01_25_169/CrossValidation/KFold_1 ${TARGET_DIR}/HWU64 + +# ----------------------------------------- DT TASK ----------------------------------------- # +# E2E +Unzip ${TARGET_DIR}/e2e-dataset-master.zip ${TARGET_DIR}/tmp/e2e "E2E" && \ +mv ${TARGET_DIR}/tmp/e2e/e2e-dataset-master ${TARGET_DIR}/E2E + +# RNNLG +Unzip ${TARGET_DIR}/RNNLG-master.zip ${TARGET_DIR}/tmp/rnnlg "RNNLG" && \ +mv ${TARGET_DIR}/tmp/rnnlg/RNNLG-master/data/original ${TARGET_DIR}/RNNLG + +# ----------------------------------------- CC TASK ----------------------------------------- # +# PERSONA-CHAT +untar ${TARGET_DIR}/convai2_fix_723.tgz ${TARGET_DIR}/tmp/persona-chat "PERSONA-CHAT" && \ +mv ${TARGET_DIR}/tmp/persona-chat ${TARGET_DIR}/PERSONA-CHAT + +# ----------------------------------------- CI TASK ----------------------------------------- # +untar ${TARGET_DIR}/character-identification-character-identification-2.0.tar.gz ${TARGET_DIR}/tmp/ENLP "ENLP" && \ +mv ${TARGET_DIR}/tmp/ENLP/character-identification-character-identification-2.0/json ${TARGET_DIR}/ENLP + +# ----------------------------------------- ABSA TASK ----------------------------------------- # +# ASTE +Unzip ${TARGET_DIR}/multi-view-prompting-main.zip ${TARGET_DIR}/tmp/aste "ASTE" && \ +mv ${TARGET_DIR}/tmp/aste/multi-view-prompting-main/data/aste ${TARGET_DIR}/ASTE + +# MAMS +Unzip ${TARGET_DIR}/MAMS-for-ABSA-master.zip ${TARGET_DIR}/tmp/mams "MAMS" && \ +mv ${TARGET_DIR}/tmp/mams/MAMS-for-ABSA-master/data ${TARGET_DIR}/MAMS + +# Twitter +Unzip ${TARGET_DIR}/twitter.zip ${TARGET_DIR}/tmp/twitter "Twitter" && \ +mv ${TARGET_DIR}/tmp/twitter ${TARGET_DIR}/Twitter + +rm -rf ${TARGET_DIR}/tmp \ No newline at end of file diff --git a/src/scripts/statistics.sh b/src/scripts/statistics.sh new file mode 100644 index 0000000000000000000000000000000000000000..68d350659adc2bb2b8a0b01b660d10f92bfc9bdc --- /dev/null +++ b/src/scripts/statistics.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +BASE_DIR=../../data/seq_truncated +TASKS=("DST" "T2S" "SF" "RRR" "QCR" "NLI" "MRC" "MCQA" "DS" "DCRG" "ER" "ID" "DT" "CC" "CI" "ABSA") +# TASKS=("DT") +TOKENIZER_PATH="../../ckpts/t5-base" +OUTPUT_DIR=../../data + +DATASETS=() + +for TASK in ${TASKS[*]}; do + for dataset in `ls ${BASE_DIR} | grep "^${TASK}-"`; do + DATASETS+=(${BASE_DIR}/${dataset}) + done +done + +python get_statistics.py \ + --input-dir-list ${DATASETS[@]} \ + --tokenizer-path ${TOKENIZER_PATH} \ + --output-path ${OUTPUT_DIR} \ diff --git a/src/scripts/truncate.sh b/src/scripts/truncate.sh new file mode 100644 index 0000000000000000000000000000000000000000..75a801731970541069d72a8f33d00baeffa4eb79 --- /dev/null +++ b/src/scripts/truncate.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +YAML=../../hyperparameters/truncation.yaml + +TRUNCATE_TASKS=$(yq e '.TRUNCATE_TASKS | join("\n")' ${YAML}) +SOURCE_DIR=$(yq e '.SOURCE_DIR' ${YAML}) +TARGET_DIR=$(yq e '.TARGET_DIR' ${YAML}) +MAX_SRC_LEN=$(yq e '.MAX_SRC_LEN' ${YAML}) +TOKENIZER_PATH=$(yq e '.TOKENIZER_PATH' ${YAML}) + +if [ ! -d "${TARGET_DIR}" ]; then + mkdir -p ${TARGET_DIR} +fi + +KEEP_TASKS=$(yq e '.KEEP_TASKS | join("\n")' ${YAML}) +for TASK in ${KEEP_TASKS}; do + cp -r ${SOURCE_DIR}/${TASK}* ${TARGET_DIR} +done + + +for TASK in ${TRUNCATE_TASKS}; do + for dataset in `ls ${SOURCE_DIR} | grep ${TASK}-`; do + max_dial_len=`yq e ".MAX_DIAL_LEN.\"${dataset}\"" ${YAML}` + truncation_side=`yq e ".TRUNCATION_SIDE.\"${TASK}\"" ${YAML}` + + python -u truncate.py \ + --input-dir ${SOURCE_DIR}/${dataset} \ + --output-dir ${TARGET_DIR}/${dataset} \ + --max-src-length ${MAX_SRC_LEN} \ + --max-dialogue-history-len ${max_dial_len} \ + --tokenizer-path ${TOKENIZER_PATH} \ + --truncate-side ${truncation_side} + done +done + +# for TASK in ${TASKS[*]}; do + +# DIAL_TRUNCATION_SIDE="left" +# if [ ${TASK} = "RRR" ]; then +# DIAL_TRUNCATION_SIDE="right" +# fi + +# for dataset in `ls ${BASE_DIR} | grep ${TASK}`; do + +# # ---------- special cases ---------- # +# # truncation side +# # if [ ${dataset} = "QCR-CANARD" ] + +# python -u truncate.py \ +# --input-dir ${BASE_DIR}/${dataset} \ +# --output-dir ${TARGET_DIR}/${dataset} \ +# --max-src-length ${MAX_SRC_LEN} \ +# --max-dialogue-history-len ${max_dialogue_length[${TASK}]} \ +# --tokenizer-path ${TOKENIZER_PATH} \ +# --truncate-side ${DIAL_TRUNCATION_SIDE} +# done +# done diff --git a/src/seq2file.py b/src/seq2file.py new file mode 100644 index 0000000000000000000000000000000000000000..86c88a6695a12d7c1e835e2f529ed0e311a241f0 --- /dev/null +++ b/src/seq2file.py @@ -0,0 +1,38 @@ +import os +import json +import sys + + +def transform(): + input_dir, output_dir = sys.argv[1:] + if not os.path.exists(output_dir): + os.makedirs(output_dir) + for filename in os.listdir(input_dir): + filepath = os.path.join(input_dir, filename) + if os.path.isfile(filepath): + split = filename.split(".")[0] + + src_file = os.path.join(output_dir, f"{split}.src") + tgt_file = os.path.join( + output_dir, f"{split}.tgt" if split == "train" else f"{split}.gold" + ) + + with open(filepath, "r", encoding="UTF-8") as reader, open( + src_file, "w" + ) as src_writer, open(tgt_file, "w") as tgt_writer: + for line in reader: + if line.strip(): + example = json.loads(line.strip()) + src_writer.write(f"{example['src']}\n") + tgt_writer.write(f"{example['tgt']}") + + if "db_id" in example and split != "train": + tgt_writer.write(f"\t{example['db_id']}") + + tgt_writer.write("\n") + elif line: + tgt_writer.write("\n") + + +if __name__ == "__main__": + transform() diff --git a/src/serialization.sh b/src/serialization.sh new file mode 100644 index 0000000000000000000000000000000000000000..fa0a8aa1acbbb52da89591bfc1127092034df94f --- /dev/null +++ b/src/serialization.sh @@ -0,0 +1,18 @@ +#!/bin/bash + + +DATASET="MAMS-ATSA" +TASK="ABSA" +# python DST.py "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/universal/${DATASET}" "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/seq-dict/DST-GoogleSimulatedDialogue-Restaurant" +# python seq2file.py "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/seq-dict/DST-GoogleSimulatedDialogue-Restaurant" "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/seq/DST-GoogleSimulatedDialogue-Restaurant" + +# for split in `ls /mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/universal/${DATASET}`; do +# python ${TASK}.py "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/universal/${DATASET}/${split}" "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/seq-dict/${TASK}-${DATASET}-${split}" +# python seq2file.py "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/seq-dict/${TASK}-${DATASET}-${split}" "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/seq/${TASK}-${DATASET}-${split}" +# done + +# python SF.py "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/universal/${DATASET}" "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/seq-dict/T2S-${DATASET}" +# python seq2file.py "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/seq-dict/T2S-${DATASET}" "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/seq/T2S-${DATASET}" + +python ${TASK}.py "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/universal/MAMS/${DATASET}" "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/seq-dict/${TASK}-${DATASET}" +python seq2file.py "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/seq-dict/${TASK}-${DATASET}" "/mnt/lustre/sjtu/home/dm311/workspace/Multi-Lingual-Dialog/data/MultiLingual/seq/${TASK}-${DATASET}" diff --git a/src/truncate.py b/src/truncate.py new file mode 100644 index 0000000000000000000000000000000000000000..8097f1ff93de6576fc0b58493093f6c36817f3c9 --- /dev/null +++ b/src/truncate.py @@ -0,0 +1,211 @@ +import argparse +import os +import shutil +from transformers import T5Tokenizer +from tqdm import tqdm + + +def parse(): + parser = argparse.ArgumentParser() + + parser.add_argument("--input-dir", type=str) + parser.add_argument("--output-dir", type=str) + parser.add_argument("--max-src-length", type=int, default=512) + parser.add_argument("--max-dialogue-history-len", type=int, default=256) + parser.add_argument("--tokenizer-path", type=str) + parser.add_argument("--special-tokens-file", type=str, default=None) + parser.add_argument( + "--truncate-side", type=str, default="left", choices=["left", "right"] + ) + + return parser.parse_args() + + +def truncate(args): + left_tokenizer = T5Tokenizer.from_pretrained( + args.tokenizer_path, truncate_side="left" + ) + right_tokenizer = T5Tokenizer.from_pretrained( + args.tokenizer_path, truncate_side="right" + ) + tokenizer = T5Tokenizer.from_pretrained(args.tokenizer_path) + + if args.special_tokens_file is not None: + with open(args.special_tokens_file, "r") as reader: + special_tokens_dict = { + "additional_special_tokens": [ + token.strip() for token in reader.readlines() + ] + } + + left_tokenizer.add_special_tokens(special_tokens_dict) + right_tokenizer.add_special_tokens(special_tokens_dict) + tokenizer.add_special_tokens(special_tokens_dict) + + def normalize(x): + return tokenizer.decode(tokenizer(x).input_ids[:-1]) + + def divide_chunks(src): + prefix, postfix = src.split("]", 1) + prefix = prefix + "]" + + knowledge_start_index = postfix.index("[EK]") + dialogue = postfix[: knowledge_start_index - 1] + knowledge_and_instruction = postfix[knowledge_start_index - 1 :] + + instruction_start_index = knowledge_and_instruction.rfind("[C]") + knowledge = knowledge_and_instruction[: instruction_start_index - 1] + instruction = knowledge_and_instruction[instruction_start_index - 1 :] + + return prefix, dialogue, knowledge, instruction + + def token_num(x): + return len(tokenizer.tokenize(x)) + + min_knowledge_len = token_num(" [EK] None") + + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + + print(f" {os.path.basename(args.input_dir)} ".center(70, "=")) + + for filename in os.listdir(args.input_dir): + if not filename.endswith(".src"): + filepath = os.path.join(args.input_dir, filename) + if not os.path.exists(os.path.join(args.output_dir, filename)): + if os.path.isfile(filepath): + shutil.copyfile( + os.path.join(args.input_dir, filename), + os.path.join(args.output_dir, filename), + ) + else: + shutil.copytree( + os.path.join(args.input_dir, filename), + os.path.join(args.output_dir, filename), + ) + else: + dialogue_cut_num = 0 + knowledge_cut_num = 0 + cut_token_num = 0 + # Truncate the source file + with open(os.path.join(args.input_dir, filename), "r") as reader, open( + os.path.join(args.output_dir, filename), "w" + ) as writer: + for line in tqdm(reader.readlines()): + src = line.strip() + src = normalize(src) + + prefix, dialogue, knowledge, instruction = divide_chunks(src) + + prefix_token_num = token_num(prefix) + dialogue_token_num = token_num(dialogue) + knowledge_token_num = token_num(knowledge) + instruction_token_num = token_num(instruction) + + assert ( + args.max_src_length >= prefix_token_num + instruction_token_num + ) + + origin_src_token_num = ( + prefix_token_num + + dialogue_token_num + + knowledge_token_num + + instruction_token_num + ) + # origin_src_token_num = token_num(src) + + # assert ( + # prefix_token_num + # + dialogue_token_num + # + knowledge_token_num + # + instruction_token_num + # == origin_src_token_num + # ) + + if origin_src_token_num > args.max_src_length: + left_token_num = ( + args.max_src_length + - prefix_token_num + - instruction_token_num + ) + max_dialogue_token_num = min( + max( + args.max_dialogue_history_len, + left_token_num - knowledge_token_num, + ), + left_token_num - min_knowledge_len, + ) + + # The dialogue is out of the maximum token number + if dialogue_token_num > max_dialogue_token_num: + # Truncate the dialogue from left or right (For DDRel) + truncate_tokenizer = ( + left_tokenizer + if args.truncate_side == "left" + else right_tokenizer + ) + dialogue_ids = truncate_tokenizer( + dialogue, + max_length=max_dialogue_token_num + + 1, # +1 is for the eos + truncation=True, + ).input_ids + + dialogue = tokenizer.decode(dialogue_ids[:-1]) + dialogue_token_num = max_dialogue_token_num + dialogue_cut_num += 1 + + # assert token_num(dialogue) <= dialogue_token_num + + if knowledge_token_num > left_token_num - dialogue_token_num: + # Truncate the knowledge from right + knowledge_ids = right_tokenizer( + knowledge, + max_length=left_token_num - dialogue_token_num + 1, + truncation=True, + ).input_ids + + knowledge = tokenizer.decode(knowledge_ids[:-1]) + + knowledge = " " + knowledge + + knowledge_token_num = left_token_num - dialogue_token_num + knowledge_cut_num += 1 + + # assert ( + # token_num(knowledge) <= knowledge_token_num + # ), f"{knowledge_token_num}, {token_num(knowledge)}, {tokenizer.convert_ids_to_tokens(knowledge_ids)}, {knowledge_ids}" + + src = ( + prefix.strip() + + " " + + dialogue.strip() + + " " + + knowledge.strip() + + " " + + instruction.strip() + ) + + src_token_num = token_num(src) + + # assert src_token_num <= args.max_src_length + + cut_token_num += origin_src_token_num - src_token_num + + prefix, dialogue, knowledge, instruction = divide_chunks(src) + + prefix_token_num = token_num(prefix) + dialogue_token_num = token_num(dialogue) + knowledge_token_num = token_num(knowledge) + instruction_token_num = token_num(instruction) + + writer.write(src + "\n") + + print(f" {filename} ".center(40, "-")) + print(f"dialogue cut num: {dialogue_cut_num}") + print(f"knowledge cut num: {knowledge_cut_num}") + print(f"token cut num: {cut_token_num}") + + +if __name__ == "__main__": + truncate(parse())