xx103's picture
Upload data_processing.py
c53ad97 verified
raw
history blame
No virus
6.34 kB
import pandas as pd
from datetime import datetime
from uszipcode import SearchEngine
from datasets import load_dataset
# dataset path to NYC_collisions_data JSON
dataset_path = "xx103/NYC_Motor_Vehicle_Collisions_and_Weather_Dataset"
data_files1 = ["df1_NYC_collisions_data.json"]
dataset1 = load_dataset(path=dataset_path, data_files=data_files1)
# Convert to pandas DataFrame
df = dataset1['train'].to_pandas()
# convert 'CRASH TIME' to datetime to further extract the hours and minutes
df['CRASH TIME'] = pd.to_datetime(df['CRASH TIME'], format='%H:%M')
# Update 'CRASH TIME' to 'CRASH TIME PERIOD', 'CONTRIBUTING FACTOR VEHICLES', 'VEHICLE TYPES', 'STREET NAME' and 'STREET TYPE'
for index, row in df.iterrows():
hour = row['CRASH TIME'].hour
period_start = (hour // 3) * 3
period_end = period_start + 2
df.at[index, 'CRASH TIME PERIOD'] = f"{period_start:02d}:00-{period_end:02d}:59"
factors = [row[f'CONTRIBUTING FACTOR VEHICLE {i}'] for i in range(1, 6) if row.get(f'CONTRIBUTING FACTOR VEHICLE {i}')]
df.at[index, 'CONTRIBUTING FACTOR VEHICLES'] = ', '.join(factors)
vehicle_types = [row[f'VEHICLE TYPE CODE {i}'] for i in range(1, 6) if row.get(f'VEHICLE TYPE CODE {i}')]
df.at[index, 'VEHICLE TYPES'] = ', '.join(vehicle_types)
street_names = []
street_types = []
# Check and append 'ON STREET NAME'
if pd.notna(row['ON STREET NAME']) and row['ON STREET NAME'] != '':
street_names.append(row['ON STREET NAME'])
street_types.append('ON STREET')
# Check and append 'CROSS STREET NAME'
if pd.notna(row['CROSS STREET NAME']) and row['CROSS STREET NAME'] != '':
street_names.append(row['CROSS STREET NAME'])
street_types.append('CROSS STREET')
# Check and append 'OFF STREET NAME'
if pd.notna(row['OFF STREET NAME']) and row['OFF STREET NAME'] != '':
street_names.append(row['OFF STREET NAME'])
street_types.append('OFF STREET')
# Join the names and types with a comma
df.at[index, 'STREET NAME'] = ', '.join(street_names)
df.at[index, 'STREET TYPE'] = ', '.join(street_types)
# Convert specific columns to numeric
numeric_columns = ['NUMBER OF PERSONS INJURED', 'NUMBER OF PEDESTRIANS INJURED', 'NUMBER OF CYCLIST INJURED', 'NUMBER OF MOTORIST INJURED',
'NUMBER OF PERSONS KILLED', 'NUMBER OF PEDESTRIANS KILLED', 'NUMBER OF CYCLIST KILLED', 'NUMBER OF MOTORIST KILLED']
for column in numeric_columns:
df[column] = pd.to_numeric(df[column], errors='coerce').fillna(0).astype(int)
# add new columns 'NUMBER OF INJURIES' and 'NUMBER OF DEATHS'
df['NUMBER OF INJURIES'] = df['NUMBER OF PERSONS INJURED'] + df['NUMBER OF PEDESTRIANS INJURED'] + df['NUMBER OF CYCLIST INJURED'] + df['NUMBER OF MOTORIST INJURED']
df['NUMBER OF DEATHS'] = df['NUMBER OF PERSONS KILLED'] + df['NUMBER OF PEDESTRIANS KILLED'] + df['NUMBER OF CYCLIST KILLED'] + df['NUMBER OF MOTORIST KILLED']
# Keeping only the necessary columns
columns_to_keep = [
'CRASH DATE', 'BOROUGH', 'ZIP CODE', 'LATITUDE', 'LONGITUDE', 'COLLISION_ID',
'CRASH TIME PERIOD', 'CONTRIBUTING FACTOR VEHICLES', 'VEHICLE TYPES',
'NUMBER OF INJURIES', 'NUMBER OF DEATHS', 'STREET NAME', 'STREET TYPE'
]
df = df[columns_to_keep]
# Create a SearchEngine object
search = SearchEngine()
# Convert 'LATITUDE' and 'LONGITUDE' to floats
df['LATITUDE'] = pd.to_numeric(df['LATITUDE'], errors='coerce')
df['LONGITUDE'] = pd.to_numeric(df['LONGITUDE'], errors='coerce')
# fill in the missing 'ZIP CODE' if it has valid 'LATITUDE' and 'LONGITUDE'
for index, row in df.iterrows():
# Check if 'ZIP CODE' is an empty string and 'LATITUDE' and 'LONGITUDE' are valid
if row['ZIP CODE'] == '' and not (pd.isna(row['LATITUDE']) or row['LATITUDE'] == 0) and not (pd.isna(row['LONGITUDE']) or row['LONGITUDE'] == 0):
result = search.by_coordinates(lat=row['LATITUDE'], lng=row['LONGITUDE'], returns=1)
if result:
# Set the 'ZIP CODE' to the found zip code
df.at[index, 'ZIP CODE'] = result[0].zipcode
# dataset path to NYC_borough_data JSON
dataset_path = "xx103/NYC_Motor_Vehicle_Collisions_and_Weather_Dataset"
data_files2 = ["df2_NYC_borough_data.json"]
dataset2 = load_dataset(path=dataset_path, data_files=data_files2)
# Convert to pandas DataFrame
df2 = dataset2['train'].to_pandas()
# Convert the 'Borough' column to uppercase
df2['Borough'] = df2['Borough'].str.upper()
# Create a mapping dictionary from ZIP Code to Borough
zip_to_borough = df2.set_index('ZIP Code')['Borough'].to_dict()
# Function to update BOROUGH based on ZIP CODE
def update_borough(row):
if pd.isna(row['BOROUGH']) or row['BOROUGH'] == '':
return zip_to_borough.get(row['ZIP CODE'], row['BOROUGH'])
else:
return row['BOROUGH']
# Apply the function to each row in df
df['BOROUGH'] = df.apply(update_borough, axis=1)
# dataset path to NYC_collisions_data JSON
dataset_path = "xx103/NYC_Motor_Vehicle_Collisions_and_Weather_Dataset"
data_files3 = ["df3_NYC_weather_data.json"]
dataset3 = load_dataset(path=dataset_path, data_files=data_files3)
# Convert to pandas DataFrame
df3 = dataset3['train'].to_pandas()
# Keep only the specified columns
df3 = df3[['datetime', 'description', 'precip', 'preciptype', 'tempmax', 'tempmin']]
# Rename the columns
df3.rename(columns={
'description': 'WEATHER DESCRIPTION',
'precip': 'PRECIPITATION',
'preciptype': 'PRECIPITATION TYPE',
'tempmax': 'TEMPMAX',
'tempmin': 'TEMPMIN'
}, inplace=True)
# Convert 'CRASH DATE' to datetime and remove the time component
df['CRASH DATE'] = pd.to_datetime(df['CRASH DATE']).dt.date
# Convert 'datetime' in df3 to datetime and remove the time component
df3['datetime'] = pd.to_datetime(df3['datetime']).dt.date
# Perform the merge
merged_df = pd.merge(left=df, right=df3, how='left', left_on='CRASH DATE', right_on='datetime')
# Drop the 'datetime' column from df3 as it's redundant now
merged_df.drop(columns=['datetime'], inplace=True)
# print the first row of merged_df
# print(merged_df.iloc[0])
# Specify the path to the new JSON file
# new_file_path = '/Users/suzyxie/Desktop/hugging_face_data/NYC_collisions_weather_merged_data.json'
# Save the DataFrame to a JSON file
# merged_df.to_json(new_file_path, orient='records', lines=True)