|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
from typing import List |
|
import datasets |
|
import logging |
|
import pandas as pd |
|
from pyproj import Transformer |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {NC Crime Dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2024} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
The dataset, compiled from public police incident reports across various cities in North Carolina, covers a period from the early 2000s through to 2024. It is intended to facilitate the study of crime trends and patterns. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
_URL = "" |
|
_URLS = "" |
|
|
|
class NCCrimeDataset(datasets.GeneratorBasedBuilder): |
|
"""Dataset for North Carolina Crime Incidents.""" |
|
_URLS = _URLS |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"year": datasets.Value("int64"), |
|
"city": datasets.Value("string"), |
|
"crime_major_category": datasets.Value("string"), |
|
"crime_detail": datasets.Value("string"), |
|
"latitude": datasets.Value("float64"), |
|
"longitude": datasets.Value("float64"), |
|
"occurance_time": datasets.Value("string"), |
|
"clear_status": datasets.Value("string"), |
|
"incident_address": datasets.Value("string"), |
|
"notes": datasets.Value("string"), |
|
}), |
|
citation=_CITATION, |
|
) |
|
|
|
|
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
|
|
cary_path = dl_manager.download_and_extract("https://data.townofcary.org/api/explore/v2.1/catalog/datasets/cpd-incidents/exports/csv?lang=en&timezone=US%2FEastern&use_labels=true&delimiter=%2C") |
|
durham_path = dl_manager.download_and_extract("https://www.arcgis.com/sharing/rest/content/items/7132216432df4957830593359b0c4030/data") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cary_df = self._preprocess_cary(cary_path) |
|
durham_df = self._preprocess_durham(durham_path) |
|
|
|
|
|
|
|
|
|
|
|
combined_df = pd.concat([cary_df, durham_df], ignore_index=True) |
|
|
|
|
|
combined_file_path = os.path.join(dl_manager.download_dir, "combined_dataset.csv") |
|
combined_df.to_csv(combined_file_path, index=False) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": combined_file_path}) |
|
] |
|
|
|
|
|
def _preprocess_durham(self, file_path): |
|
|
|
Durham = pd.read_excel(file_path) |
|
|
|
|
|
Durham['Weapon'] = Durham['Weapon'].replace(['(blank)', 'Not Applicable/None', 'Unknown/Not Stated'], None) |
|
|
|
|
|
category_mapping = { |
|
'Theft': ['LARCENY - AUTOMOBILE PARTS OR ACCESSORIES', 'TOWED/ABANDONED VEHICLE', 'MOTOR VEHICLE THEFT', 'BURGLARY', 'LARCENY - FROM MOTOR VEHICLE', 'LARCENY - SHOPLIFTING', 'LOST PROPERTY', 'VANDALISM', 'LARCENY - ALL OTHER', 'LARCENY - FROM BUILDING', 'RECOVERED STOLEN PROPERTY (OTHER JURISDICTION)', 'LARCENY - POCKET-PICKING', 'LARCENY - FROM COIN-OPERATED DEVICE', 'LARCENY - PURSESNATCHING'], |
|
'Fraud': ['FRAUD-IDENTITY THEFT', 'EMBEZZLEMENT', 'COUNTERFEITING/FORGERY', 'FRAUD - CONFIDENCE GAMES/TRICKERY', 'FRAUD - CREDIT CARD/ATM', 'FRAUD - UNAUTHORIZED USE OF CONVEYANCE', 'FRAUD - FALSE PRETENSE', 'FRAUD - IMPERSONATION', 'FRAUD - WIRE/COMPUTER/OTHER ELECTRONIC', 'FRAUD - WORTHLESS CHECKS', 'FRAUD-FAIL TO RETURN RENTAL VEHICLE', 'FRAUD-HACKING/COMPUTER INVASION', 'FRAUD-WELFARE FRAUD'], |
|
'Assault': ['SIMPLE ASSAULT', 'AGGRAVATED ASSAULT'], |
|
'Drugs': ['DRUG/NARCOTIC VIOLATIONS', 'DRUG EQUIPMENT/PARAPHERNALIA'], |
|
'Sexual Offenses': ['SEX OFFENSE - FORCIBLE RAPE', 'SEX OFFENSE - SEXUAL ASSAULT WITH AN OBJECT', 'SEX OFFENSE - FONDLING', 'SEX OFFENSE - INDECENT EXPOSURE', 'SEX OFFENSE - FORCIBLE SODOMY', 'SEX OFFENSE - STATUTORY RAPE', 'SEX OFFENSE - PEEPING TOM', 'SEX OFFENSE - INCEST'], |
|
'Homicide': ['HOMICIDE-MURDER/NON-NEGLIGENT MANSLAUGHTER', 'JUSTIFIABLE HOMICIDE', 'HOMICIDE - NEGLIGENT MANSLAUGHTER'], |
|
'Arson': ['ARSON'], |
|
'Kidnapping': ['KIDNAPPING/ABDUCTION'], |
|
'Weapons Violations': ['WEAPON VIOLATIONS'], |
|
'Traffic Violations': ['ALL TRAFFIC (EXCEPT DWI)'], |
|
'Disorderly Conduct': ['DISORDERLY CONDUCT', 'DISORDERLY CONDUCT-DRUNK AND DISRUPTIVE', 'DISORDERLY CONDUCT-FIGHTING (AFFRAY)', 'DISORDERLY CONDUCT-UNLAWFUL ASSEMBLY'], |
|
'Gambling': ['GAMBLING - OPERATING/PROMOTING/ASSISTING', 'GAMBLING - BETTING/WAGERING'], |
|
'Animal-related Offenses': ['ANIMAL CRUELTY'], |
|
'Prostitution-related Offenses': ['PROSTITUTION', 'PROSTITUTION - ASSISTING/PROMOTING', 'PROSTITUTION - PURCHASING'] |
|
} |
|
|
|
|
|
def categorize_crime(crime): |
|
for category, crimes in category_mapping.items(): |
|
if crime in crimes: |
|
return category |
|
return 'Miscellaneous' |
|
|
|
|
|
def convert_coordinates(x, y): |
|
transformer = Transformer.from_crs("epsg:2264", "epsg:4326", always_xy=True) |
|
lon, lat = transformer.transform(x, y) |
|
return pd.Series([lat, lon]) |
|
|
|
|
|
Durham_new = pd.DataFrame({ |
|
|
|
}) |
|
|
|
|
|
Durham_new[['latitude', 'longitude']] = Durham.apply(lambda row: convert_coordinates(row['X'], row['Y']), axis=1).round(5).fillna(0) |
|
|
|
|
|
Durham_new = Durham_new[Durham_new['year'] >= 2015].fillna("No Data") |
|
|
|
return Durham_new |
|
|
|
def _preprocess_cary(self, file_path): |
|
|
|
df = pd.read_csv(file_path, low_memory=False).dropna(subset=['Year']) |
|
|
|
|
|
def categorize_crime(crime): |
|
crime_mapping = { |
|
'Theft': ['BURGLARY', 'MOTOR VEHICLE THEFT', 'LARCENY'], |
|
'Arson': ['ARSON'], |
|
'Assault': ['AGGRAVATED ASSAULT'], |
|
'Homicide': ['MURDER'], |
|
'Robbery': ['ROBBERY'] |
|
} |
|
for category, crimes in crime_mapping.items(): |
|
if crime in crimes: |
|
return category |
|
return 'Miscellaneous' |
|
|
|
|
|
processed_df = pd.DataFrame({ |
|
"year": df["Year"].astype(int), |
|
"city": "Cary", |
|
"crime_major_category": df['Crime Category'].apply(categorize_crime).str.title(), |
|
"crime_detail": df['Crime Type'].str.title(), |
|
"latitude": df['Lat'].fillna(0).round(5).fillna(0), |
|
"longitude": df['Lon'].fillna(0).round(5).fillna(0), |
|
"occurance_time": pd.to_datetime(df['Begin Date Of Occurrence'] + ' ' + df['Begin Time Of Occurrence']).dt.strftime('%Y/%m/%d %H:%M:%S'), |
|
"clear_status": None, |
|
"incident_address": df['Geo Code'], |
|
"notes": 'District: '+ df['District'].str.title() + ' Violent Property: ' + df['Violent Property'].str.title() |
|
}).fillna("No Data") |
|
|
|
|
|
processed_df = processed_df[processed_df['year'] >= 2015] |
|
|
|
return processed_df |
|
|
|
|
|
def _generate_examples(self, filepath): |
|
|
|
df = pd.read_csv(filepath) |
|
|
|
for i, row in df.iterrows(): |
|
yield i, { |
|
"year": int(row["year"]), |
|
"city": row["city"], |
|
"crime_major_category": row["crime_major_category"], |
|
"crime_detail": row["crime_detail"], |
|
"latitude": float(row["latitude"]), |
|
"longitude": float(row["longitude"]), |
|
"occurance_time": row["occurance_time"], |
|
"clear_status": row["clear_status"], |
|
"incident_address": row["incident_address"], |
|
"notes": row["notes"], |
|
} |
|
|