|
|
|
|
|
|
|
|
|
|
|
|
|
import pandas as pd |
|
import os |
|
|
|
from helpers import ( |
|
get_combined_df, |
|
save_final_df_as_jsonl, |
|
handle_slug_column_mappings, |
|
set_home_type, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
DATA_DIR = "../data" |
|
PROCESSED_DIR = "../processed/" |
|
FACET_DIR = "new_construction/" |
|
FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR) |
|
FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR) |
|
|
|
|
|
|
|
|
|
|
|
exclude_columns = [ |
|
"RegionID", |
|
"SizeRank", |
|
"RegionName", |
|
"RegionType", |
|
"StateName", |
|
"Home Type", |
|
] |
|
|
|
slug_column_mappings = { |
|
"_median_sale_price_per_sqft": "Median Sale Price per Sqft", |
|
"_median_sale_price": "Median Sale Price", |
|
"sales_count": "Sales Count", |
|
} |
|
|
|
data_frames = [] |
|
|
|
for filename in os.listdir(FULL_DATA_DIR_PATH): |
|
if filename.endswith(".csv"): |
|
print("processing " + filename) |
|
cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename)) |
|
|
|
cur_df = set_home_type(cur_df, filename) |
|
|
|
data_frames = handle_slug_column_mappings( |
|
data_frames, slug_column_mappings, exclude_columns, filename, cur_df |
|
) |
|
|
|
|
|
combined_df = get_combined_df( |
|
data_frames, |
|
[ |
|
"RegionID", |
|
"SizeRank", |
|
"RegionName", |
|
"RegionType", |
|
"StateName", |
|
"Home Type", |
|
"Date", |
|
], |
|
) |
|
|
|
combined_df |
|
|
|
|
|
|
|
|
|
|
|
final_df = combined_df |
|
final_df = final_df.rename( |
|
columns={ |
|
"RegionID": "Region ID", |
|
"SizeRank": "Size Rank", |
|
"RegionName": "Region", |
|
"RegionType": "Region Type", |
|
"StateName": "State", |
|
} |
|
) |
|
|
|
final_df["Date"] = pd.to_datetime(final_df["Date"], format="%Y-%m-%d") |
|
|
|
final_df.sort_values(by=["Region ID", "Home Type", "Date"]) |
|
|
|
|
|
|
|
|
|
|
|
save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df) |
|
|
|
|