Datasets:
Tasks:
Text Classification
Modalities:
Text
Sub-tasks:
hate-speech-detection
Languages:
English
Size:
100K - 1M
License:
File size: 4,881 Bytes
0c95d61 bca395e 0c95d61 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
import re
import math
import pandas as pd
from tqdm import tqdm
seed = 7497
TOXIC_COLUMNS = [
"toxic",
"severe_toxic",
"obscene",
"threat",
"insult",
"identity_hate",
]
# Time and date regexes
TIME = r"([0-9]{1,2}:[0-9]{2}( (am|AM|pm|PM))?)"
DAY = r"([23]?(1(st)?|2(nd)?|3(rd)?|[4-9](th)?)|1[0-9](th)?)"
MONTH = r"(January|February|March|April|May|June|July|August|September|October|November|December|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Nov|Dec)"
YEAR = r"('?[0-9]{2}|[0-9]{4})"
DATE = rf"(({DAY} {MONTH}|{MONTH} {DAY})(,? {YEAR})?)"
TIMESTAMP = rf"((({TIME},? (\(UTC\) )?)?{DATE}|({DATE},? )?{TIME})(\s+\(UTC\))?)"
# The 'talk' part at the end of a signature
TALK = r"((\|\s*|\(\s*)?[tT]alk((\s*[-|β’, ]\s*|\s+)[cC]ontribs)?(\s*[-|)])?)"
# IP addresses
IP = r"([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})"
# Username and the username part of a the signature
USERNAME = r"([^#<>[\]|{}/@\s]+)"
USER_SIG = rf"((((?:\s)[-ββ]\s*)?(\((User:)?{USERNAME}\)|User:{USERNAME})|(?:\s)[-ββ]\s*{USERNAME})(\s+{TALK})?)"
# A full signature
SIGNATURE = rf"(((([-ββ]\s*)?{IP}(\s+{USER_SIG})?|(?:\s)[-ββ]\s*[uU]nsigned|{TALK}|{USER_SIG})(\s+{TIMESTAMP})?)|{TIMESTAMP}(\s+{TALK})?)"
# List of the patterns to remove
REGEX_REMOVE = [
r"^(\"+|'+)", # Initial quotation marks
r"(\"+|'+)$", # Final quotation marks
r"^REDIRECT.*$", # The whole comment is a redirect
rf"^\s*{SIGNATURE}", # Initial signature
rf"{SIGNATURE}\s*$", # Final signature
r" \[[0-9]+\]|\[[0-9]+\] ", # Citations
r"β\s+[tT]alk - [-a-zA-Z0-9._()\s]+β",
r"==[^=]+==",
r"^::+",
r"^\s*\(UTC\)",
rf"Unblock {IP}",
r"2nd Unblock Request",
r":Category:",
r"File:[^\s]+",
r"\{\|.+\|\}", # Embedded code
# r"\{\{.+\s.+\}\}", # Embedded code
r"^\s+", # Initial whitespace
r"\s+$", # Trailing whitespace
]
# List of patterns to replaces
REGEX_REPLACE = {
"\n+": "\n",
"\\'": "'",
'""+': '"',
"''+": "'",
# r"(WP|Wikipedia):[^\s]+": "URL", # Wikipedia internal links
r"[^\s]+#[^\s]+": "URL", # Wikipedia internal links
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)": "URL", # ULRs
r"([uU]ser_[tT]alk|[tT]alk):[^\s]+": "URL", # Talk links
}
def clean_sentence(sentence):
"""Preprocess a sentence using the regex rules"""
for pattern in REGEX_REMOVE:
sentence = re.sub(pattern, "", sentence)
for pattern, repl in REGEX_REPLACE.items():
sentence = re.sub(pattern, repl, sentence)
return sentence
def make_binary_label(row):
"""Make a row label binary by combining all toxicity types"""
for column in TOXIC_COLUMNS:
if row[column] == 1:
return 1
return 0
print("Loading original data...")
# Load up the original data
train_df = pd.read_csv("orig_train.csv").set_index("id")
test_text_df = pd.read_csv("orig_test.csv").set_index("id")
test_labels_df = pd.read_csv("orig_test_labels.csv").set_index("id")
# Remove the datapoints which have no label
test_text_df = test_text_df.loc[test_labels_df["toxic"] != -1]
test_labels_df = test_labels_df.loc[test_labels_df["toxic"] != -1]
# Join the test text and labels to make a complete dataset
test_df = test_text_df.join(test_labels_df)
print("Cleaning train split...")
for index, row in tqdm(train_df.iterrows(), total=len(train_df)):
row["comment_text"] = clean_sentence(row["comment_text"])
print("Cleaning test split...")
for index, row in tqdm(test_df.iterrows(), total=len(test_df)):
row["comment_text"] = clean_sentence(row["comment_text"])
# Some texts will get reduced to the empty string. Let's remove them first
print("Removing empty texts...")
train_df = train_df.loc[train_df["comment_text"] != ""]
test_df = test_df.loc[test_df["comment_text"] != ""]
# Get rid of any duplicates we made
print("Removing duplicate entries...")
train_df = train_df.drop_duplicates(subset=["comment_text"])
test_df = test_df.drop_duplicates(subset=["comment_text"])
print("Creating binary column...")
# Make the new binary column
train_df["label"] = train_df.apply(make_binary_label, axis=1)
test_df["label"] = test_df.apply(make_binary_label, axis=1)
# Remove all other classification columns
train_df = train_df.drop(columns=TOXIC_COLUMNS)
test_df = test_df.drop(columns=TOXIC_COLUMNS)
print("Creating eval split...")
# Shuffle the current train split
train_df = train_df.sample(frac=1, random_state=seed)
# The new size of the train split
train_size = math.floor(len(train_df) * 0.8)
# Separate into train and eval splits
eval_df = train_df[train_size:]
train_df = train_df[:train_size]
# print("Saving to disk...")
with open("train.csv", "w") as f:
train_df.to_csv(f)
with open("validation.csv", "w") as f:
eval_df.to_csv(f)
with open("test.csv", "w") as f:
test_df.to_csv(f) |