|
import pandas as pd |
|
from datetime import datetime |
|
from uszipcode import SearchEngine |
|
from datasets import load_dataset |
|
|
|
|
|
dataset_path = "xx103/NYC_Motor_Vehicle_Collisions_and_Weather_Dataset" |
|
data_files1 = ["df1_NYC_collisions_data.json"] |
|
|
|
dataset1 = load_dataset(path=dataset_path, data_files=data_files1) |
|
|
|
|
|
df = dataset1['train'].to_pandas() |
|
|
|
|
|
df['CRASH TIME'] = pd.to_datetime(df['CRASH TIME'], format='%H:%M') |
|
|
|
|
|
for index, row in df.iterrows(): |
|
hour = row['CRASH TIME'].hour |
|
period_start = (hour // 3) * 3 |
|
period_end = period_start + 2 |
|
df.at[index, 'CRASH TIME PERIOD'] = f"{period_start:02d}:00-{period_end:02d}:59" |
|
|
|
factors = [row[f'CONTRIBUTING FACTOR VEHICLE {i}'] for i in range(1, 6) if row.get(f'CONTRIBUTING FACTOR VEHICLE {i}')] |
|
df.at[index, 'CONTRIBUTING FACTOR VEHICLES'] = ', '.join(factors) |
|
|
|
vehicle_types = [row[f'VEHICLE TYPE CODE {i}'] for i in range(1, 6) if row.get(f'VEHICLE TYPE CODE {i}')] |
|
df.at[index, 'VEHICLE TYPES'] = ', '.join(vehicle_types) |
|
|
|
street_names = [] |
|
street_types = [] |
|
|
|
|
|
if pd.notna(row['ON STREET NAME']) and row['ON STREET NAME'] != '': |
|
street_names.append(row['ON STREET NAME']) |
|
street_types.append('ON STREET') |
|
|
|
|
|
if pd.notna(row['CROSS STREET NAME']) and row['CROSS STREET NAME'] != '': |
|
street_names.append(row['CROSS STREET NAME']) |
|
street_types.append('CROSS STREET') |
|
|
|
|
|
if pd.notna(row['OFF STREET NAME']) and row['OFF STREET NAME'] != '': |
|
street_names.append(row['OFF STREET NAME']) |
|
street_types.append('OFF STREET') |
|
|
|
|
|
df.at[index, 'STREET NAME'] = ', '.join(street_names) |
|
df.at[index, 'STREET TYPE'] = ', '.join(street_types) |
|
|
|
|
|
|
|
numeric_columns = ['NUMBER OF PERSONS INJURED', 'NUMBER OF PEDESTRIANS INJURED', 'NUMBER OF CYCLIST INJURED', 'NUMBER OF MOTORIST INJURED', |
|
'NUMBER OF PERSONS KILLED', 'NUMBER OF PEDESTRIANS KILLED', 'NUMBER OF CYCLIST KILLED', 'NUMBER OF MOTORIST KILLED'] |
|
for column in numeric_columns: |
|
df[column] = pd.to_numeric(df[column], errors='coerce').fillna(0).astype(int) |
|
|
|
|
|
df['NUMBER OF INJURIES'] = df['NUMBER OF PERSONS INJURED'] + df['NUMBER OF PEDESTRIANS INJURED'] + df['NUMBER OF CYCLIST INJURED'] + df['NUMBER OF MOTORIST INJURED'] |
|
df['NUMBER OF DEATHS'] = df['NUMBER OF PERSONS KILLED'] + df['NUMBER OF PEDESTRIANS KILLED'] + df['NUMBER OF CYCLIST KILLED'] + df['NUMBER OF MOTORIST KILLED'] |
|
|
|
|
|
columns_to_keep = [ |
|
'CRASH DATE', 'BOROUGH', 'ZIP CODE', 'LATITUDE', 'LONGITUDE', 'COLLISION_ID', |
|
'CRASH TIME PERIOD', 'CONTRIBUTING FACTOR VEHICLES', 'VEHICLE TYPES', |
|
'NUMBER OF INJURIES', 'NUMBER OF DEATHS', 'STREET NAME', 'STREET TYPE' |
|
] |
|
df = df[columns_to_keep] |
|
|
|
|
|
search = SearchEngine() |
|
|
|
|
|
df['LATITUDE'] = pd.to_numeric(df['LATITUDE'], errors='coerce') |
|
df['LONGITUDE'] = pd.to_numeric(df['LONGITUDE'], errors='coerce') |
|
|
|
|
|
|
|
for index, row in df.iterrows(): |
|
|
|
if row['ZIP CODE'] == '' and not (pd.isna(row['LATITUDE']) or row['LATITUDE'] == 0) and not (pd.isna(row['LONGITUDE']) or row['LONGITUDE'] == 0): |
|
result = search.by_coordinates(lat=row['LATITUDE'], lng=row['LONGITUDE'], returns=1) |
|
if result: |
|
|
|
df.at[index, 'ZIP CODE'] = result[0].zipcode |
|
|
|
|
|
dataset_path = "xx103/NYC_Motor_Vehicle_Collisions_and_Weather_Dataset" |
|
data_files2 = ["df2_NYC_borough_data.json"] |
|
|
|
dataset2 = load_dataset(path=dataset_path, data_files=data_files2) |
|
|
|
|
|
df2 = dataset2['train'].to_pandas() |
|
|
|
df2['Borough'] = df2['Borough'].str.upper() |
|
|
|
|
|
zip_to_borough = df2.set_index('ZIP Code')['Borough'].to_dict() |
|
|
|
|
|
def update_borough(row): |
|
if pd.isna(row['BOROUGH']) or row['BOROUGH'] == '': |
|
return zip_to_borough.get(row['ZIP CODE'], row['BOROUGH']) |
|
else: |
|
return row['BOROUGH'] |
|
|
|
|
|
df['BOROUGH'] = df.apply(update_borough, axis=1) |
|
|
|
|
|
dataset_path = "xx103/NYC_Motor_Vehicle_Collisions_and_Weather_Dataset" |
|
data_files3 = ["df3_NYC_weather_data.json"] |
|
|
|
dataset3 = load_dataset(path=dataset_path, data_files=data_files3) |
|
|
|
|
|
df3 = dataset3['train'].to_pandas() |
|
|
|
|
|
df3 = df3[['datetime', 'description', 'precip', 'preciptype', 'tempmax', 'tempmin']] |
|
|
|
|
|
df3.rename(columns={ |
|
'description': 'WEATHER DESCRIPTION', |
|
'precip': 'PRECIPITATION', |
|
'preciptype': 'PRECIPITATION TYPE', |
|
'tempmax': 'TEMPMAX', |
|
'tempmin': 'TEMPMIN' |
|
}, inplace=True) |
|
|
|
|
|
df['CRASH DATE'] = pd.to_datetime(df['CRASH DATE']).dt.date |
|
|
|
|
|
df3['datetime'] = pd.to_datetime(df3['datetime']).dt.date |
|
|
|
|
|
merged_df = pd.merge(left=df, right=df3, how='left', left_on='CRASH DATE', right_on='datetime') |
|
|
|
|
|
merged_df.drop(columns=['datetime'], inplace=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|