Datasets:
CZLC
/

Modalities:
Text
Formats:
json
Languages:
Czech
Libraries:
Datasets
pandas
License:
File size: 7,370 Bytes
d4be186
 
c057c58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
#author: Martin Fajčík

import gzip
import os
import re
from typing import Dict
from tqdm import tqdm

FILE_PATH = ".data/ORTOFONv1/ortofon_v1_vert.gz"
with gzip.open(FILE_PATH, "rt") as f:
    data = f.read()

def process_vert_format_ortofon(vert_content: str) -> Dict[str, str]:
    # Pattern to match document boundaries and extract metadata
    doc_pattern = re.compile(r'<doc[^>]*>.*?</doc>', re.DOTALL)
    metadata_pattern = re.compile(
        r'<doc id="([^"]*)" year="([^"]*)" month="([^"]*)" location="([^"]*)" situation="([^"]*)" speakers="([^"]*)" genders="([^"]*)" generations="([^"]*)" relationship="([^"]*)"[^>]*>')

    # Pattern to match speaker turns
    sp_pattern = re.compile(r'<sp[^>]*nickname="([^"]*)"[^>]*>(.*?)</sp>', re.DOTALL)

    # Pattern to match pw tags
    pw_pattern = re.compile(r'<pw>\n(.*?)</pw>\n', re.DOTALL)

    # Pattern to remove speaker suffix
    remove_speaker_suffix = re.compile(r'_[0-9]+$')

    # Pattern to remove whitespace before punctuation
    ws_before_punct = re.compile(r'\s+([.!?])')

    # Find all documents
    documents = re.findall(doc_pattern, vert_content)
    processed_documents = {}

    for doc in tqdm(documents):
        # Extract metadata
        metadata_match = re.search(metadata_pattern, doc)
        if metadata_match:
            doc_id = metadata_match.group(1)
            location = metadata_match.group(4)
            situation = metadata_match.group(5)
            speakers = metadata_match.group(6)
            genders = metadata_match.group(7)
            generations = metadata_match.group(8)
            relationship = metadata_match.group(9)
            metadata_str = (f"Lokalita: {location}, Situace: {situation}, "
                            f"Počet mluvčích: {speakers}, Pohlaví: {genders}, "
                            f"Generace: {generations}, Vztah: {relationship}")
        else:
            raise ValueError("Metadata not found in document")

        # Initialize an empty list to hold processed document text
        processed_document = [metadata_str]

        # Find all speaker turns within the document
        for sp_match in re.findall(sp_pattern, doc):
            speaker_id = sp_match[0]
            # sometimes speaker_id ends with _1, _2, _89, etc. Remove it
            speaker_id = re.sub(remove_speaker_suffix, '', speaker_id)

            # if speaker is Y, rename him as Jiný zvuk
            if speaker_id == "Y":
                speaker_id = "Zvuk"


            sp_content = sp_match[1]

            segs = re.findall(pw_pattern, sp_content)
            if segs == []:
                segs = [sp_content]
            # remove tags from each line, and join text
            tokens = [line.split("\t")[0].strip() for seg in segs for line in seg.split("\n") if line != ""]
            speaker_text = " ".join(tokens)

            # - sometimes lines in ortofon are containing three dots only, such as [mluvčí: Miroslava] ... // REMOVE THESE LINES
            if speaker_text.strip() == "...":
                continue
            # - sometimes lines in ortofon contain @ only, e.g., [mluvčí: Radka] @ // REMOVE THESE LINES
            if speaker_text.strip() == "@":
                continue

            # remove whitespace before ., !, ?
            speaker_text = re.sub(ws_before_punct, r'\1', speaker_text)

            # Format the speaker turn and add to the processed document list
            processed_document.append(f"[mluvčí: {speaker_id}] {speaker_text}")


        # Join all speaker turns into a single string for the document
        final_text = '\n'.join(processed_document)
        processed_documents[doc_id] = final_text

    return processed_documents


ortofon_data = process_vert_format_ortofon(data)
del data


FILE_PATH = ".data/ORAL2013/oral2013_vert.gz"
with gzip.open(FILE_PATH, "rt") as f:
    data = f.read()

def process_vert_format_oral(vert_content: str) -> Dict[str, str]:
    # Pattern to match document boundaries and extract metadata
    doc_pattern = re.compile(r'<doc[^>]*>.*?</doc>', re.DOTALL)
    metadata_pattern = re.compile(
        r'<doc id="([^"]*)" temp="([^"]*)" pocet="([^"]*)" vztah="([^"]*)" situace="([^"]*)" promluva="([^"]*)"[^>]*>'
    )
    # Pattern to match speaker turns
    sp_pattern = re.compile(r'<sp[^>]*num="([^"]*)"[^>]*>(.*?)</sp>', re.DOTALL)

    # Pattern to match seg tags
    seg_pattern = re.compile(r'<seg start="[^"]*" end="[^"]*">(.*?)</seg>\n', re.DOTALL)

    # Pattern to remove whitespace before punctuation
    ws_before_punct = re.compile(r'\s+([.!?])')

    # Find all documents
    documents = re.findall(doc_pattern, vert_content)
    processed_documents = {}

    for doc in tqdm(documents):
        # Extract metadata
        metadata_match = re.search(metadata_pattern, doc)
        if metadata_match:
            doc_id = metadata_match.group(1)
            situation = metadata_match.group(5)
            speakers = metadata_match.group(3)
            relationship = metadata_match.group(4)

            metadata_str = (f"Situace: {situation}, "
                            f"Počet mluvčích: {speakers}, "
                            f"Vztah: {relationship}")
        else:
            raise ValueError("Metadata not found in document")

        # Initialize an empty list to hold processed document text
        processed_document = [metadata_str]

        # Find all speaker turns within the document
        for sp_match in re.findall(sp_pattern, doc):
            speaker_id = sp_match[0]

            # if speaker is Y, rename him as Jiný zvuk
            if speaker_id == "Y":
                speaker_id = "Zvuk"

            sp_content = sp_match[1]
            # remove symbols ---, ...:,
            sp_content = sp_content.replace("---", "")
            sp_content = sp_content.replace("...:", "")
            sp_content = sp_content.replace("...", "")
            sp_content = sp_content.replace("?.", "?")


            segs = re.findall(seg_pattern, sp_content)
            if segs == []:
                segs = [sp_content]
            # remove tags from each line, and join text
            tokens = [line.split("\t")[0].strip() for seg in segs for line in seg.split("\n") if line.strip() != ""]
            speaker_text = " ".join(tokens)

            # remove whitespace before ., !, ?
            speaker_text = re.sub(ws_before_punct, r'\1', speaker_text)

            # - sometimes lines in oral are empty? e.g. 08A009N // REMOVE THESE LINES
            if speaker_text.strip() == "":
                continue

            # Format the speaker turn and add to the processed document list
            processed_document.append(f"[mluvčí: {speaker_id}] {speaker_text}")


        # Join all speaker turns into a single string for the document
        final_text = '\n'.join(processed_document)
        processed_documents[doc_id] = final_text

    return processed_documents

oral_data = process_vert_format_oral(data)

# merge ortofon and oral data
ortofon_data.update(oral_data)

# save the merged data in jsonlines as {"text": doc, "id": doc_id}

import jsonlines
FILE_PATH = ".data/hf_dataset/ortofon_oral/test.jsonl"

os.makedirs(os.path.dirname(FILE_PATH), exist_ok=True)
with jsonlines.open(FILE_PATH, 'w') as writer:
    for doc_id, doc in ortofon_data.items():
        writer.write({"text": doc, "id": doc_id})