sami_parallel / correct /create_correct_dataset.py
pere's picture
first 3 stammer training
381bc18
raw
history blame contribute delete
No virus
4.98 kB
from datasets import load_dataset
import random
import argparse
import csv
import glob
import pandas as pd
from sklearn.model_selection import train_test_split
def main(args):
trans_chars = ",.:;!"
filelist = glob.glob('../langid/sentences/*.txt')
data = pd.DataFrame()
for tsvfile in filelist:
print(f"Processing {tsvfile}")
tmp = pd.read_csv(tsvfile, sep='\t',on_bad_lines='skip',engine='python',encoding='utf8')
if len(tmp.columns)==1:
tmp.insert(0,'id','')
tmp.columns=['id','source']
data=pd.concat([data,tmp])
# Trim
data['source'] = data['source'].str.strip()
# Drop rows that does not end with punctation
data = data[data['source'].str[-1:].isin([".",",",":",";","!"])]
# For not creating chaos later with . . . Just remove examples with elipsis
data = data[~data['source'].str.contains("...", regex=False)]
data = data[~data['source'].str.contains(". . .", regex=False)]
#Drop the id
data = data.drop(['id'],axis=1)
# Duplicate column
data['target'] = data['source']
# Apply each method to 10% of the corpus
# set a stop
stop =int(len(data)/10)
stop_small = int(stop/2)
#Main shuffling
data = data.sample(frac=1).reset_index(drop=True)
# Lowercase in 10% of the cases
data.loc[:stop,'source'] = data['source'].str.lower()
print(f"Lower casing 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
# Uppercase in 5% of the cases
data.loc[:stop_small,'source'] = data['source'].str.upper()
print(f"Upper casing 5% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
# Remove all spaces in 10% of the cases
data.loc[:stop,'source'] = data['source'].str.replace(" ","")
print(f"Removing space 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
# Remove both spaces and do lowercasing in 10%
data.loc[:stop,'source'] = data['source'].str.replace(" ","")
data.loc[:stop,'source'] = data['source'].str.lower()
print(f"Removing space and doing lovercase 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
# Remove a random number of spaces in 10% of the cases
for index, row in data[0:stop].iterrows():
source = row['source']
#Find the spaces
spacepos = [pos for pos, char in enumerate(source) if char == " "]
random.shuffle(spacepos)
#Reduce to a random number
spacepos = spacepos[0:random.randint(0,len(spacepos))]
##Sort in reverse order
spacepos.sort(reverse=True)
##Loop and replace
for s in spacepos:
source = source[:s] + source[s+1:]
data.loc[index,'source'] = source
print(f"Removing a random number of spaces 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
# Remove all punctation in 10% of the cases
trans_table = source.maketrans("", "", trans_chars)
data.loc[:stop,'source'] = data['source'].str.translate(trans_table)
print(f"Removing punctation 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
# Remove a random number of commas in 10% of the cases
for index, row in data[0:stop].iterrows():
source = row['source']
#Find the spaces
spacepos = [pos for pos, char in enumerate(source) if char == ", "]
random.shuffle(spacepos)
#Reduce to a random number
spacepos = spacepos[0:random.randint(0,len(spacepos))]
##Sort in reverse order
spacepos.sort(reverse=True)
##Loop and replace
for s in spacepos:
source = source[:s] + " " +source[s+1:]
data.loc[index,'source'] = source
print(f"Removing a random number of commas 10% - '{data.loc[0]['target']}' -> '{data.loc[0]['source']}'")
data = data.sample(frac=1).reset_index(drop=True)
data.loc[:,'source'] = "correct: "+data['source']
# Train - test - dev
train, test = train_test_split(data, test_size=0.2)
test, dev = train_test_split(test, test_size=0.5)
# Write the datasets to disk
train.to_csv('correct_datafiles/correct_train.tsv', index=False, header=False, sep='\t')
test.to_csv('correct_datafiles/correct_test.tsv', index=False, header=False, sep='\t')
dev.to_csv('correct_datafiles/correct_dev.tsv', index=False, header=False, sep='\t')
def parse_args():
# Parse commandline
parser = argparse.ArgumentParser()
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)