# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: Address all TODOs and remove all explanatory comments """TODO: Add a description here.""" import csv import json import os from typing import List import datasets import logging import pandas as pd from pyproj import Transformer # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {NC Crime Dataset}, author={huggingface, Inc. }, year={2024} } """ # TODO: Add description of the dataset here # You can copy an official description _DESCRIPTION = """\ The dataset, compiled from public police incident reports across various cities in North Carolina, covers a period from the early 2000s through to 2024. It is intended to facilitate the study of crime trends and patterns. """ # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "" # TODO: Add link to the official dataset URLs here # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) _URL = "" _URLS = "" class NCCrimeDataset(datasets.GeneratorBasedBuilder): """Dataset for North Carolina Crime Incidents.""" _URLS = _URLS VERSION = datasets.Version("1.0.0") def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "year": datasets.Value("int64"), "city": datasets.Value("string"), "crime_major_category": datasets.Value("string"), "crime_detail": datasets.Value("string"), "latitude": datasets.Value("float64"), "longitude": datasets.Value("float64"), "occurance_time": datasets.Value("string"), "clear_status": datasets.Value("string"), "incident_address": datasets.Value("string"), "notes": datasets.Value("string"), }), citation=_CITATION, ) def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: # Use the raw GitHub link to download the CSV file cary_path = dl_manager.download_and_extract("https://data.townofcary.org/api/explore/v2.1/catalog/datasets/cpd-incidents/exports/csv?lang=en&timezone=US%2FEastern&use_labels=true&delimiter=%2C") durham_path = dl_manager.download_and_extract("https://www.arcgis.com/sharing/rest/content/items/7132216432df4957830593359b0c4030/data") # chapel_hill_path = dl_manager.download_and_extract("https://drive.google.com/uc?export=download&id=19cZzyedCLUtQt9Ko4bcOixWIJHBn9CfI") # raleigh_path = dl_manager.download_and_extract("https://drive.google.com/uc?export=download&id=1SZi4e01TxwuDDb6k9EU_7i-qTP1Xq2sm") # Cary # "https://data.townofcary.org/api/explore/v2.1/catalog/datasets/cpd-incidents/exports/csv?lang=en&timezone=US%2FEastern&use_labels=true&delimiter=%2C", # Durham # "https://www.arcgis.com/sharing/rest/content/items/7132216432df4957830593359b0c4030/data", # Raleigh # "https://drive.google.com/uc?export=download&id=19cZzyedCLUtQt9Ko4bcOixWIJHBn9CfI", # Chapel Hill # "https://drive.google.com/uc?export=download&id=1SZi4e01TxwuDDb6k9EU_7i-qTP1Xq2sm" cary_df = self._preprocess_cary(cary_path) durham_df = self._preprocess_durham(durham_path) # raleigh_df = self._preprocess_raleigh(raleigh_path) # chapel_hill_df = self._preprocess_chapel_hill(chapel_hill_path) # combined_df = pd.concat([cary_df, durham_df, raleigh_df, chapel_hill_df], ignore_index=True) combined_df = pd.concat([cary_df, durham_df], ignore_index=True) combined_file_path = os.path.join(dl_manager.download_dir, "combined_dataset.csv") combined_df.to_csv(combined_file_path, index=False) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": combined_file_path}) ] def _preprocess_durham(self, file_path): # Load the dataset Durham = pd.read_excel(file_path) # Clean the 'Weapon' column Durham['Weapon'] = Durham['Weapon'].replace(['(blank)', 'Not Applicable/None', 'Unknown/Not Stated'], None) # Define the category mapping category_mapping = { 'Theft': ['LARCENY - AUTOMOBILE PARTS OR ACCESSORIES', 'TOWED/ABANDONED VEHICLE', 'MOTOR VEHICLE THEFT', 'BURGLARY', 'LARCENY - FROM MOTOR VEHICLE', 'LARCENY - SHOPLIFTING', 'LOST PROPERTY', 'VANDALISM', 'LARCENY - ALL OTHER', 'LARCENY - FROM BUILDING', 'RECOVERED STOLEN PROPERTY (OTHER JURISDICTION)', 'LARCENY - POCKET-PICKING', 'LARCENY - FROM COIN-OPERATED DEVICE', 'LARCENY - PURSESNATCHING'], 'Fraud': ['FRAUD-IDENTITY THEFT', 'EMBEZZLEMENT', 'COUNTERFEITING/FORGERY', 'FRAUD - CONFIDENCE GAMES/TRICKERY', 'FRAUD - CREDIT CARD/ATM', 'FRAUD - UNAUTHORIZED USE OF CONVEYANCE', 'FRAUD - FALSE PRETENSE', 'FRAUD - IMPERSONATION', 'FRAUD - WIRE/COMPUTER/OTHER ELECTRONIC', 'FRAUD - WORTHLESS CHECKS', 'FRAUD-FAIL TO RETURN RENTAL VEHICLE', 'FRAUD-HACKING/COMPUTER INVASION', 'FRAUD-WELFARE FRAUD'], 'Assault': ['SIMPLE ASSAULT', 'AGGRAVATED ASSAULT'], 'Drugs': ['DRUG/NARCOTIC VIOLATIONS', 'DRUG EQUIPMENT/PARAPHERNALIA'], 'Sexual Offenses': ['SEX OFFENSE - FORCIBLE RAPE', 'SEX OFFENSE - SEXUAL ASSAULT WITH AN OBJECT', 'SEX OFFENSE - FONDLING', 'SEX OFFENSE - INDECENT EXPOSURE', 'SEX OFFENSE - FORCIBLE SODOMY', 'SEX OFFENSE - STATUTORY RAPE', 'SEX OFFENSE - PEEPING TOM', 'SEX OFFENSE - INCEST'], 'Homicide': ['HOMICIDE-MURDER/NON-NEGLIGENT MANSLAUGHTER', 'JUSTIFIABLE HOMICIDE', 'HOMICIDE - NEGLIGENT MANSLAUGHTER'], 'Arson': ['ARSON'], 'Kidnapping': ['KIDNAPPING/ABDUCTION'], 'Weapons Violations': ['WEAPON VIOLATIONS'], 'Traffic Violations': ['ALL TRAFFIC (EXCEPT DWI)'], 'Disorderly Conduct': ['DISORDERLY CONDUCT', 'DISORDERLY CONDUCT-DRUNK AND DISRUPTIVE', 'DISORDERLY CONDUCT-FIGHTING (AFFRAY)', 'DISORDERLY CONDUCT-UNLAWFUL ASSEMBLY'], 'Gambling': ['GAMBLING - OPERATING/PROMOTING/ASSISTING', 'GAMBLING - BETTING/WAGERING'], 'Animal-related Offenses': ['ANIMAL CRUELTY'], 'Prostitution-related Offenses': ['PROSTITUTION', 'PROSTITUTION - ASSISTING/PROMOTING', 'PROSTITUTION - PURCHASING'] } # Function to categorize crime def categorize_crime(crime): for category, crimes in category_mapping.items(): if crime in crimes: return category return 'Miscellaneous' # Coordinate transformation function def convert_coordinates(x, y): transformer = Transformer.from_crs("epsg:2264", "epsg:4326", always_xy=True) lon, lat = transformer.transform(x, y) return pd.Series([lat, lon]) # Create a new DataFrame with simplified crime categories Durham_new = pd.DataFrame({ # Your DataFrame creation code }) # Convert coordinates and round/fill missing values Durham_new[['latitude', 'longitude']] = Durham.apply(lambda row: convert_coordinates(row['X'], row['Y']), axis=1).round(5).fillna(0) # Filter records and handle missing values Durham_new = Durham_new[Durham_new['year'] >= 2015].fillna("No Data") return Durham_new def _preprocess_cary(self, file_path): # Load the dataset df = pd.read_csv(file_path, low_memory=False).dropna(subset=['Year']) # Define the crime categorization function def categorize_crime(crime): crime_mapping = { 'Theft': ['BURGLARY', 'MOTOR VEHICLE THEFT', 'LARCENY'], 'Arson': ['ARSON'], 'Assault': ['AGGRAVATED ASSAULT'], 'Homicide': ['MURDER'], 'Robbery': ['ROBBERY'] } for category, crimes in crime_mapping.items(): if crime in crimes: return category return 'Miscellaneous' # Apply the crime categorization function and preprocess the dataset processed_df = pd.DataFrame({ "year": df["Year"].astype(int), "city": "Cary", "crime_major_category": df['Crime Category'].apply(categorize_crime).str.title(), "crime_detail": df['Crime Type'].str.title(), "latitude": df['Lat'].fillna(0).round(5).fillna(0), "longitude": df['Lon'].fillna(0).round(5).fillna(0), "occurance_time": pd.to_datetime(df['Begin Date Of Occurrence'] + ' ' + df['Begin Time Of Occurrence']).dt.strftime('%Y/%m/%d %H:%M:%S'), "clear_status": None, "incident_address": df['Geo Code'], "notes": 'District: '+ df['District'].str.title() + ' Violent Property: ' + df['Violent Property'].str.title() }).fillna("No Data") # Filter the dataset for records from 2015 onwards processed_df = processed_df[processed_df['year'] >= 2015] return processed_df def _generate_examples(self, filepath): # Read the CSV file df = pd.read_csv(filepath) # Iterate over the rows and yield examples for i, row in df.iterrows(): yield i, { "year": int(row["year"]), "city": row["city"], "crime_major_category": row["crime_major_category"], "crime_detail": row["crime_detail"], "latitude": float(row["latitude"]), "longitude": float(row["longitude"]), "occurance_time": row["occurance_time"], "clear_status": row["clear_status"], "incident_address": row["incident_address"], "notes": row["notes"], }