NC_Crime / NC_Crime.py
zwn22's picture
Update NC_Crime.py
2b0b978 verified
raw
history blame
11.5 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""TODO: Add a description here."""
import csv
import json
import os
from typing import List
import datasets
import logging
import pandas as pd
from pyproj import Transformer
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {NC Crime Dataset},
author={huggingface, Inc.
},
year={2024}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
The dataset, compiled from public police incident reports across various cities in North Carolina, covers a period from the early 2000s through to 2024. It is intended to facilitate the study of crime trends and patterns.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URL = ""
_URLS = ""
class NCCrimeDataset(datasets.GeneratorBasedBuilder):
"""Dataset for North Carolina Crime Incidents."""
_URLS = _URLS
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"year": datasets.Value("int64"),
"city": datasets.Value("string"),
"crime_major_category": datasets.Value("string"),
"crime_detail": datasets.Value("string"),
"latitude": datasets.Value("float64"),
"longitude": datasets.Value("float64"),
"occurance_time": datasets.Value("string"),
"clear_status": datasets.Value("string"),
"incident_address": datasets.Value("string"),
"notes": datasets.Value("string"),
}),
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
# Use the raw GitHub link to download the CSV file
cary_path = dl_manager.download_and_extract("https://data.townofcary.org/api/explore/v2.1/catalog/datasets/cpd-incidents/exports/csv?lang=en&timezone=US%2FEastern&use_labels=true&delimiter=%2C")
# durham_path = dl_manager.download_and_extract("https://www.arcgis.com/sharing/rest/content/items/7132216432df4957830593359b0c4030/data")
chapel_hill_path = dl_manager.download_and_extract("https://drive.google.com/uc?export=download&id=19cZzyedCLUtQt9Ko4bcOixWIJHBn9CfI")
# raleigh_path = dl_manager.download_and_extract("https://drive.google.com/uc?export=download&id=1SZi4e01TxwuDDb6k9EU_7i-qTP1Xq2sm")
# Cary
# "https://data.townofcary.org/api/explore/v2.1/catalog/datasets/cpd-incidents/exports/csv?lang=en&timezone=US%2FEastern&use_labels=true&delimiter=%2C",
# Durham
# "https://www.arcgis.com/sharing/rest/content/items/7132216432df4957830593359b0c4030/data",
# Raleigh
# "https://drive.google.com/uc?export=download&id=19cZzyedCLUtQt9Ko4bcOixWIJHBn9CfI",
# Chapel Hill
# "https://drive.google.com/uc?export=download&id=1SZi4e01TxwuDDb6k9EU_7i-qTP1Xq2sm"
cary_df = self._preprocess_cary(cary_path)
# durham_df = self._preprocess_durham(durham_path)
# raleigh_df = self._preprocess_raleigh(raleigh_path)
chapel_hill_df = self._preprocess_chapel_hill(chapel_hill_path)
# combined_df = pd.concat([cary_df, durham_df, raleigh_df, chapel_hill_df], ignore_index=True)
combined_df = pd.concat([cary_df, chapel_hill_df], ignore_index=True)
combined_file_path = os.path.join(dl_manager.download_dir, "combined_dataset.csv")
combined_df.to_csv(combined_file_path, index=False)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": combined_file_path})
]
def _preprocess_chapel_hill(self, file_path):
# Load the dataset
Chapel = pd.read_csv(file_path, low_memory=False)
# Replace specified values with None
replace_values = {'<Null>': None, 'NONE': None}
Chapel['Weapon_Description'] = Chapel['Weapon_Description'].replace(replace_values)
# Define the category mapping
category_mapping = {
'Theft': ['THEFT/LARCENY', 'LARCENY FROM AU', 'LARCENY FROM PE', 'LARCENY OF OTHE', 'LARCENY FROM BU', 'LARCENY OF BIKE', 'LARCENY FROM RE', 'LARCENY OF AUTO'],
'Assault': ['ASSAULT/SEXUAL', 'ASSAULT', 'STAB GUNSHOT PE', 'ACTIVE ASSAILAN'],
'Burglary': ['BURGLARY', 'BURGLARY ATTEMP', 'STRUCTURE COLLAPSE', 'ROBBERY/CARJACK'],
'Drugs': ['DRUGS'],
'Traffic Violations': ['TRAFFIC STOP', 'TRAFFIC/TRANSPO', 'TRAFFIC VIOLATI', 'MVC', 'MVC W INJURY', 'MVC W INJURY AB', 'MVC W INJURY DE', 'MVC ENTRAPMENT'],
'Disorderly Conduct': ['DISTURBANCE/NUI', 'DOMESTIC DISTUR', 'DISPUTE', 'DISTURBANCE', 'LOST PROPERTY', 'TRESPASSING/UNW', 'REFUSAL TO LEAV', 'SUSPICIOUS COND', 'STRUCTURE FIRE'],
'Fraud': ['FRAUD OR DECEPT'],
'Sexual Offenses': ['SEXUAL OFFENSE'],
'Homicide': ['SUICIDE ATTEMPT', 'ABUSE/ABANDOMEN', 'DECEASED PERSON'],
'Weapons Violations': ['WEAPON/FIREARMS'],
'Animal-related Offenses': ['ANIMAL BITE', 'ANIMAL', 'ANIMAL CALL'],
'Missing Person': ['MISSING PERSON'],
'Public Service': ['PUBLIC SERVICE', 'PUBLICE SERVICE'],
'Miscellaneous': ['<Null>', 'SUSPICIOUS/WANT', 'MISC OFFICER IN', 'INDECENCY/LEWDN', 'PUBLIC SERVICE', 'TRESPASSING', 'UNKNOWN PROBLEM', 'LOUD NOISE', 'ESCORT', 'ABDUCTION/CUSTO', 'THREATS', 'BURGLAR ALARM', 'DOMESTIC', 'PROPERTY FOUND', 'FIREWORKS', 'MISSING/RUNAWAY', 'MENTAL DISORDER', 'CHECK WELL BEIN', 'PSYCHIATRIC', 'OPEN DOOR', 'ABANDONED AUTO', 'HARASSMENT THRE', 'JUVENILE RELATE', 'ASSIST MOTORIST', 'HAZARDOUS DRIVI', 'MVC', 'GAS LEAK FIRE', 'ASSIST OTHER AG', 'DOMESTIC ASSIST', 'SUSPICIOUS VEHI', 'UNKNOWN LE', 'ALARMS', '911 HANGUP', 'BOMB/CBRN/PRODU', 'STATIONARY PATR', 'LITTERING', 'HOUSE CHECK', 'CARDIAC', 'CLOSE PATROL', 'BOMB FOUND/SUSP', 'INFO FOR ALL UN', 'UNCONCIOUS OR F', 'LIFTING ASSISTA', 'ATTEMPT TO LOCA', 'SICK PERSON', 'HEAT OR COLD EX', 'CONFINED SPACE', 'TRAUMATIC INJUR', 'DROWNING', 'CITY ORDINANCE']
}
# Function to categorize crime
def categorize_crime(crime):
for category, crimes in category_mapping.items():
if crime in crimes:
return category
return 'Miscellaneous'
# Create a new DataFrame with simplified crime categories
Chapel_new = pd.DataFrame({
"year": pd.to_datetime(Chapel['Date_of_Occurrence']).dt.year,
"city": "Chapel Hill",
"crime_major_category": Chapel['Reported_As'].apply(categorize_crime),
"crime_detail": Chapel['Offense'].str.title(),
"latitude": Chapel['X'].round(5).fillna(0),
"longitude": Chapel['Y'].round(5).fillna(0),
"occurance_time": pd.to_datetime(Chapel['Date_of_Occurrence'].str.replace(r'\+\d{2}$', '', regex=True)).dt.strftime('%Y/%m/%d %H:%M:%S'),
"clear_status": None,
"incident_address": Chapel['Street'].str.replace("@", " "),
"notes": Chapel['Weapon_Description'].apply(lambda x: f"Weapon: {x}" if pd.notnull(x) else "Weapon: None").str.title()
}).fillna("No Data")
# Correct the latitude and longitude if necessary
Chapel_new.loc[(Chapel_new['latitude'].between(-80, -70)) & (Chapel_new['longitude'].between(30, 40)), ['latitude', 'longitude']] = Chapel_new.loc[(Chapel_new['latitude'].between(-80, -70)) & (Chapel_new['longitude'].between(30, 40)), ['longitude', 'latitude']].values
# Ensure latitude and longitude are in the expected range
Chapel_new = Chapel_new.loc[(Chapel_new['latitude'].between(30, 40)) & (Chapel_new['longitude'].between(-80, -70))]
# Filter for years 2015 and onwards
Chapel_new = Chapel_new[Chapel_new['year'] >= 2015]
return Chapel_new
def _preprocess_cary(self, file_path):
# Load the dataset
df = pd.read_csv(file_path, low_memory=False).dropna(subset=['Year'])
# Define the crime categorization function
def categorize_crime(crime):
crime_mapping = {
'Theft': ['BURGLARY', 'MOTOR VEHICLE THEFT', 'LARCENY'],
'Arson': ['ARSON'],
'Assault': ['AGGRAVATED ASSAULT'],
'Homicide': ['MURDER'],
'Robbery': ['ROBBERY']
}
for category, crimes in crime_mapping.items():
if crime in crimes:
return category
return 'Miscellaneous'
# Apply the crime categorization function and preprocess the dataset
processed_df = pd.DataFrame({
"year": df["Year"].astype(int),
"city": "Cary",
"crime_major_category": df['Crime Category'].apply(categorize_crime).str.title(),
"crime_detail": df['Crime Type'].str.title(),
"latitude": df['Lat'].fillna(0).round(5).fillna(0),
"longitude": df['Lon'].fillna(0).round(5).fillna(0),
"occurance_time": pd.to_datetime(df['Begin Date Of Occurrence'] + ' ' + df['Begin Time Of Occurrence']).dt.strftime('%Y/%m/%d %H:%M:%S'),
"clear_status": None,
"incident_address": df['Geo Code'],
"notes": 'District: '+ df['District'].str.title() + ' Violent Property: ' + df['Violent Property'].str.title()
}).fillna("No Data")
# Filter the dataset for records from 2015 onwards
processed_df = processed_df[processed_df['year'] >= 2015]
return processed_df
def _generate_examples(self, filepath):
# Read the CSV file
df = pd.read_csv(filepath)
# Iterate over the rows and yield examples
for i, row in df.iterrows():
yield i, {
"year": int(row["year"]),
"city": row["city"],
"crime_major_category": row["crime_major_category"],
"crime_detail": row["crime_detail"],
"latitude": float(row["latitude"]),
"longitude": float(row["longitude"]),
"occurance_time": row["occurance_time"],
"clear_status": row["clear_status"],
"incident_address": row["incident_address"],
"notes": row["notes"],
}