misikoff commited on
Commit
fc88e22
1 Parent(s): 8316188

fix: adjust processors to share more code

Browse files
processors/days_on_market.ipynb CHANGED
@@ -2,7 +2,7 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 6,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
@@ -10,6 +10,7 @@
10
  "import os\n",
11
  "\n",
12
  "from helpers import (\n",
 
13
  " get_combined_df,\n",
14
  " save_final_df_as_jsonl,\n",
15
  " handle_slug_column_mappings,\n",
@@ -19,20 +20,16 @@
19
  },
20
  {
21
  "cell_type": "code",
22
- "execution_count": 7,
23
  "metadata": {},
24
  "outputs": [],
25
  "source": [
26
- "DATA_DIR = \"../data\"\n",
27
- "PROCESSED_DIR = \"../processed/\"\n",
28
- "FACET_DIR = \"days_on_market/\"\n",
29
- "FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)\n",
30
- "FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)"
31
  ]
32
  },
33
  {
34
  "cell_type": "code",
35
- "execution_count": 8,
36
  "metadata": {},
37
  "outputs": [
38
  {
@@ -379,7 +376,7 @@
379
  "[586714 rows x 13 columns]"
380
  ]
381
  },
382
- "execution_count": 8,
383
  "metadata": {},
384
  "output_type": "execute_result"
385
  }
@@ -403,15 +400,16 @@
403
  " \"_perc_listings_price_cut_\": \"Percent Listings Price Cut\",\n",
404
  "}\n",
405
  "\n",
 
406
  "\n",
407
- "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
408
  " if filename.endswith(\".csv\"):\n",
409
  " print(\"processing \" + filename)\n",
410
  " # skip month files for now since they are redundant\n",
411
  " if \"month\" in filename:\n",
412
  " continue\n",
413
  "\n",
414
- " cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))\n",
415
  "\n",
416
  " cur_df[\"RegionName\"] = cur_df[\"RegionName\"].astype(str)\n",
417
  " cur_df = set_home_type(cur_df, filename)\n",
@@ -439,7 +437,7 @@
439
  },
440
  {
441
  "cell_type": "code",
442
- "execution_count": 9,
443
  "metadata": {},
444
  "outputs": [
445
  {
@@ -729,7 +727,7 @@
729
  "[586714 rows x 13 columns]"
730
  ]
731
  },
732
- "execution_count": 9,
733
  "metadata": {},
734
  "output_type": "execute_result"
735
  }
@@ -753,11 +751,11 @@
753
  },
754
  {
755
  "cell_type": "code",
756
- "execution_count": 5,
757
  "metadata": {},
758
  "outputs": [],
759
  "source": [
760
- "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
761
  ]
762
  }
763
  ],
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 4,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
 
10
  "import os\n",
11
  "\n",
12
  "from helpers import (\n",
13
+ " get_data_path_for_config,\n",
14
  " get_combined_df,\n",
15
  " save_final_df_as_jsonl,\n",
16
  " handle_slug_column_mappings,\n",
 
20
  },
21
  {
22
  "cell_type": "code",
23
+ "execution_count": 5,
24
  "metadata": {},
25
  "outputs": [],
26
  "source": [
27
+ "CONFIG_NAME = \"days_on_market\""
 
 
 
 
28
  ]
29
  },
30
  {
31
  "cell_type": "code",
32
+ "execution_count": 6,
33
  "metadata": {},
34
  "outputs": [
35
  {
 
376
  "[586714 rows x 13 columns]"
377
  ]
378
  },
379
+ "execution_count": 6,
380
  "metadata": {},
381
  "output_type": "execute_result"
382
  }
 
400
  " \"_perc_listings_price_cut_\": \"Percent Listings Price Cut\",\n",
401
  "}\n",
402
  "\n",
403
+ "data_dir_path = get_data_path_for_config(CONFIG_NAME)\n",
404
  "\n",
405
+ "for filename in os.listdir(data_dir_path):\n",
406
  " if filename.endswith(\".csv\"):\n",
407
  " print(\"processing \" + filename)\n",
408
  " # skip month files for now since they are redundant\n",
409
  " if \"month\" in filename:\n",
410
  " continue\n",
411
  "\n",
412
+ " cur_df = pd.read_csv(os.path.join(data_dir_path, filename))\n",
413
  "\n",
414
  " cur_df[\"RegionName\"] = cur_df[\"RegionName\"].astype(str)\n",
415
  " cur_df = set_home_type(cur_df, filename)\n",
 
437
  },
438
  {
439
  "cell_type": "code",
440
+ "execution_count": 7,
441
  "metadata": {},
442
  "outputs": [
443
  {
 
727
  "[586714 rows x 13 columns]"
728
  ]
729
  },
730
+ "execution_count": 7,
731
  "metadata": {},
732
  "output_type": "execute_result"
733
  }
 
751
  },
752
  {
753
  "cell_type": "code",
754
+ "execution_count": 8,
755
  "metadata": {},
756
  "outputs": [],
757
  "source": [
758
+ "save_final_df_as_jsonl(CONFIG_NAME, final_df)"
759
  ]
760
  }
761
  ],
processors/days_on_market.py CHANGED
@@ -1,13 +1,14 @@
1
  #!/usr/bin/env python
2
  # coding: utf-8
3
 
4
- # In[6]:
5
 
6
 
7
  import pandas as pd
8
  import os
9
 
10
  from helpers import (
 
11
  get_combined_df,
12
  save_final_df_as_jsonl,
13
  handle_slug_column_mappings,
@@ -15,17 +16,13 @@ from helpers import (
15
  )
16
 
17
 
18
- # In[7]:
19
 
20
 
21
- DATA_DIR = "../data"
22
- PROCESSED_DIR = "../processed/"
23
- FACET_DIR = "days_on_market/"
24
- FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
25
- FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
26
 
27
 
28
- # In[8]:
29
 
30
 
31
  data_frames = []
@@ -46,15 +43,16 @@ slug_column_mappings = {
46
  "_perc_listings_price_cut_": "Percent Listings Price Cut",
47
  }
48
 
 
49
 
50
- for filename in os.listdir(FULL_DATA_DIR_PATH):
51
  if filename.endswith(".csv"):
52
  print("processing " + filename)
53
  # skip month files for now since they are redundant
54
  if "month" in filename:
55
  continue
56
 
57
- cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
58
 
59
  cur_df["RegionName"] = cur_df["RegionName"].astype(str)
60
  cur_df = set_home_type(cur_df, filename)
@@ -80,7 +78,7 @@ combined_df = get_combined_df(
80
  combined_df
81
 
82
 
83
- # In[4]:
84
 
85
 
86
  # Adjust column names
@@ -99,8 +97,8 @@ final_df["Date"] = pd.to_datetime(final_df["Date"], format="%Y-%m-%d")
99
  final_df
100
 
101
 
102
- # In[5]:
103
 
104
 
105
- save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
106
 
 
1
  #!/usr/bin/env python
2
  # coding: utf-8
3
 
4
+ # In[4]:
5
 
6
 
7
  import pandas as pd
8
  import os
9
 
10
  from helpers import (
11
+ get_data_path_for_config,
12
  get_combined_df,
13
  save_final_df_as_jsonl,
14
  handle_slug_column_mappings,
 
16
  )
17
 
18
 
19
+ # In[5]:
20
 
21
 
22
+ CONFIG_NAME = "days_on_market"
 
 
 
 
23
 
24
 
25
+ # In[6]:
26
 
27
 
28
  data_frames = []
 
43
  "_perc_listings_price_cut_": "Percent Listings Price Cut",
44
  }
45
 
46
+ data_dir_path = get_data_path_for_config(CONFIG_NAME)
47
 
48
+ for filename in os.listdir(data_dir_path):
49
  if filename.endswith(".csv"):
50
  print("processing " + filename)
51
  # skip month files for now since they are redundant
52
  if "month" in filename:
53
  continue
54
 
55
+ cur_df = pd.read_csv(os.path.join(data_dir_path, filename))
56
 
57
  cur_df["RegionName"] = cur_df["RegionName"].astype(str)
58
  cur_df = set_home_type(cur_df, filename)
 
78
  combined_df
79
 
80
 
81
+ # In[7]:
82
 
83
 
84
  # Adjust column names
 
97
  final_df
98
 
99
 
100
+ # In[8]:
101
 
102
 
103
+ save_final_df_as_jsonl(CONFIG_NAME, final_df)
104
 
processors/for_sale_listings.ipynb CHANGED
@@ -10,6 +10,7 @@
10
  "import os\n",
11
  "\n",
12
  "from helpers import (\n",
 
13
  " get_combined_df,\n",
14
  " save_final_df_as_jsonl,\n",
15
  " handle_slug_column_mappings,\n",
@@ -23,11 +24,7 @@
23
  "metadata": {},
24
  "outputs": [],
25
  "source": [
26
- "DATA_DIR = \"../data\"\n",
27
- "PROCESSED_DIR = \"../processed/\"\n",
28
- "FACET_DIR = \"for_sale_listings/\"\n",
29
- "FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)\n",
30
- "FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)"
31
  ]
32
  },
33
  {
@@ -345,6 +342,8 @@
345
  }
346
  ],
347
  "source": [
 
 
348
  "exclude_columns = [\n",
349
  " \"RegionID\",\n",
350
  " \"SizeRank\",\n",
@@ -360,13 +359,12 @@
360
  " \"new_pending\": \"New Pending\",\n",
361
  "}\n",
362
  "\n",
 
363
  "\n",
364
- "data_frames = []\n",
365
- "\n",
366
- "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
367
  " if filename.endswith(\".csv\"):\n",
368
  " print(\"processing \" + filename)\n",
369
- " cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))\n",
370
  "\n",
371
  " # ignore monthly data for now since it is redundant\n",
372
  " if \"month\" in filename:\n",
@@ -378,7 +376,6 @@
378
  " data_frames, slug_column_mappings, exclude_columns, filename, cur_df\n",
379
  " )\n",
380
  "\n",
381
- "\n",
382
  "combined_df = get_combined_df(\n",
383
  " data_frames,\n",
384
  " [\n",
@@ -702,7 +699,7 @@
702
  "metadata": {},
703
  "outputs": [],
704
  "source": [
705
- "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
706
  ]
707
  }
708
  ],
 
10
  "import os\n",
11
  "\n",
12
  "from helpers import (\n",
13
+ " get_data_path_for_config,\n",
14
  " get_combined_df,\n",
15
  " save_final_df_as_jsonl,\n",
16
  " handle_slug_column_mappings,\n",
 
24
  "metadata": {},
25
  "outputs": [],
26
  "source": [
27
+ "CONFIG_NAME = \"for_sale_listings\""
 
 
 
 
28
  ]
29
  },
30
  {
 
342
  }
343
  ],
344
  "source": [
345
+ "data_frames = []\n",
346
+ "\n",
347
  "exclude_columns = [\n",
348
  " \"RegionID\",\n",
349
  " \"SizeRank\",\n",
 
359
  " \"new_pending\": \"New Pending\",\n",
360
  "}\n",
361
  "\n",
362
+ "data_dir_path = get_data_path_for_config(CONFIG_NAME)\n",
363
  "\n",
364
+ "for filename in os.listdir(data_dir_path):\n",
 
 
365
  " if filename.endswith(\".csv\"):\n",
366
  " print(\"processing \" + filename)\n",
367
+ " cur_df = pd.read_csv(os.path.join(data_dir_path, filename))\n",
368
  "\n",
369
  " # ignore monthly data for now since it is redundant\n",
370
  " if \"month\" in filename:\n",
 
376
  " data_frames, slug_column_mappings, exclude_columns, filename, cur_df\n",
377
  " )\n",
378
  "\n",
 
379
  "combined_df = get_combined_df(\n",
380
  " data_frames,\n",
381
  " [\n",
 
699
  "metadata": {},
700
  "outputs": [],
701
  "source": [
702
+ "save_final_df_as_jsonl(CONFIG_NAME, final_df)"
703
  ]
704
  }
705
  ],
processors/for_sale_listings.py CHANGED
@@ -8,6 +8,7 @@ import pandas as pd
8
  import os
9
 
10
  from helpers import (
 
11
  get_combined_df,
12
  save_final_df_as_jsonl,
13
  handle_slug_column_mappings,
@@ -18,16 +19,14 @@ from helpers import (
18
  # In[2]:
19
 
20
 
21
- DATA_DIR = "../data"
22
- PROCESSED_DIR = "../processed/"
23
- FACET_DIR = "for_sale_listings/"
24
- FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
25
- FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
26
 
27
 
28
  # In[3]:
29
 
30
 
 
 
31
  exclude_columns = [
32
  "RegionID",
33
  "SizeRank",
@@ -43,13 +42,12 @@ slug_column_mappings = {
43
  "new_pending": "New Pending",
44
  }
45
 
 
46
 
47
- data_frames = []
48
-
49
- for filename in os.listdir(FULL_DATA_DIR_PATH):
50
  if filename.endswith(".csv"):
51
  print("processing " + filename)
52
- cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
53
 
54
  # ignore monthly data for now since it is redundant
55
  if "month" in filename:
@@ -61,7 +59,6 @@ for filename in os.listdir(FULL_DATA_DIR_PATH):
61
  data_frames, slug_column_mappings, exclude_columns, filename, cur_df
62
  )
63
 
64
-
65
  combined_df = get_combined_df(
66
  data_frames,
67
  [
@@ -100,5 +97,5 @@ final_df
100
  # In[5]:
101
 
102
 
103
- save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
104
 
 
8
  import os
9
 
10
  from helpers import (
11
+ get_data_path_for_config,
12
  get_combined_df,
13
  save_final_df_as_jsonl,
14
  handle_slug_column_mappings,
 
19
  # In[2]:
20
 
21
 
22
+ CONFIG_NAME = "for_sale_listings"
 
 
 
 
23
 
24
 
25
  # In[3]:
26
 
27
 
28
+ data_frames = []
29
+
30
  exclude_columns = [
31
  "RegionID",
32
  "SizeRank",
 
42
  "new_pending": "New Pending",
43
  }
44
 
45
+ data_dir_path = get_data_path_for_config(CONFIG_NAME)
46
 
47
+ for filename in os.listdir(data_dir_path):
 
 
48
  if filename.endswith(".csv"):
49
  print("processing " + filename)
50
+ cur_df = pd.read_csv(os.path.join(data_dir_path, filename))
51
 
52
  # ignore monthly data for now since it is redundant
53
  if "month" in filename:
 
59
  data_frames, slug_column_mappings, exclude_columns, filename, cur_df
60
  )
61
 
 
62
  combined_df = get_combined_df(
63
  data_frames,
64
  [
 
97
  # In[5]:
98
 
99
 
100
+ save_final_df_as_jsonl(CONFIG_NAME, final_df)
101
 
processors/helpers.py CHANGED
@@ -2,6 +2,11 @@ import pandas as pd
2
  import os
3
 
4
 
 
 
 
 
 
5
  def coalesce_columns(
6
  df,
7
  ):
@@ -82,13 +87,15 @@ def get_melted_df(
82
  return df
83
 
84
 
85
- def save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df):
86
- if not os.path.exists(FULL_PROCESSED_DIR_PATH):
87
- os.makedirs(FULL_PROCESSED_DIR_PATH)
88
 
89
- final_df.to_json(
90
- FULL_PROCESSED_DIR_PATH + "final.jsonl", orient="records", lines=True
91
- )
 
 
 
92
 
93
 
94
  def handle_slug_column_mappings(
 
2
  import os
3
 
4
 
5
+ def get_data_path_for_config(config_name):
6
+ data_dir = "../data"
7
+ return os.path.join(data_dir, config_name)
8
+
9
+
10
  def coalesce_columns(
11
  df,
12
  ):
 
87
  return df
88
 
89
 
90
+ def save_final_df_as_jsonl(config_name, df):
91
+ processed_dir = "../processed/"
 
92
 
93
+ if not os.path.exists(processed_dir):
94
+ os.makedirs(processed_dir)
95
+
96
+ full_path = os.path.join(processed_dir, config_name + ".jsonl")
97
+
98
+ df.to_json(full_path, orient="records", lines=True)
99
 
100
 
101
  def handle_slug_column_mappings(
processors/home_values.ipynb CHANGED
@@ -10,6 +10,7 @@
10
  "import os\n",
11
  "\n",
12
  "from helpers import (\n",
 
13
  " get_combined_df,\n",
14
  " save_final_df_as_jsonl,\n",
15
  " handle_slug_column_mappings,\n",
@@ -23,11 +24,7 @@
23
  "metadata": {},
24
  "outputs": [],
25
  "source": [
26
- "DATA_DIR = \"../data\"\n",
27
- "PROCESSED_DIR = \"../processed/\"\n",
28
- "FACET_DIR = \"home_values/\"\n",
29
- "FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)\n",
30
- "FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)"
31
  ]
32
  },
33
  {
@@ -375,10 +372,12 @@
375
  " \"\": \"ZHVI\",\n",
376
  "}\n",
377
  "\n",
378
- "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
 
 
379
  " if filename.endswith(\".csv\"):\n",
380
  " print(\"processing \" + filename)\n",
381
- " cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))\n",
382
  " exclude_columns = [\n",
383
  " \"RegionID\",\n",
384
  " \"SizeRank\",\n",
@@ -1054,11 +1053,11 @@
1054
  },
1055
  {
1056
  "cell_type": "code",
1057
- "execution_count": 13,
1058
  "metadata": {},
1059
  "outputs": [],
1060
  "source": [
1061
- "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
1062
  ]
1063
  }
1064
  ],
 
10
  "import os\n",
11
  "\n",
12
  "from helpers import (\n",
13
+ " get_data_path_for_config,\n",
14
  " get_combined_df,\n",
15
  " save_final_df_as_jsonl,\n",
16
  " handle_slug_column_mappings,\n",
 
24
  "metadata": {},
25
  "outputs": [],
26
  "source": [
27
+ "CONFIG_NAME = \"home_values\""
 
 
 
 
28
  ]
29
  },
30
  {
 
372
  " \"\": \"ZHVI\",\n",
373
  "}\n",
374
  "\n",
375
+ "data_dir_path = get_data_path_for_config(CONFIG_NAME)\n",
376
+ "\n",
377
+ "for filename in os.listdir(data_dir_path):\n",
378
  " if filename.endswith(\".csv\"):\n",
379
  " print(\"processing \" + filename)\n",
380
+ " cur_df = pd.read_csv(os.path.join(data_dir_path, filename))\n",
381
  " exclude_columns = [\n",
382
  " \"RegionID\",\n",
383
  " \"SizeRank\",\n",
 
1053
  },
1054
  {
1055
  "cell_type": "code",
1056
+ "execution_count": 6,
1057
  "metadata": {},
1058
  "outputs": [],
1059
  "source": [
1060
+ "save_final_df_as_jsonl(CONFIG_NAME, final_df)"
1061
  ]
1062
  }
1063
  ],
processors/home_values.py CHANGED
@@ -8,6 +8,7 @@ import pandas as pd
8
  import os
9
 
10
  from helpers import (
 
11
  get_combined_df,
12
  save_final_df_as_jsonl,
13
  handle_slug_column_mappings,
@@ -18,14 +19,10 @@ from helpers import (
18
  # In[2]:
19
 
20
 
21
- DATA_DIR = "../data"
22
- PROCESSED_DIR = "../processed/"
23
- FACET_DIR = "home_values/"
24
- FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
25
- FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
26
 
27
 
28
- # In[5]:
29
 
30
 
31
  data_frames = []
@@ -37,10 +34,12 @@ slug_column_mappings = {
37
  "": "ZHVI",
38
  }
39
 
40
- for filename in os.listdir(FULL_DATA_DIR_PATH):
 
 
41
  if filename.endswith(".csv"):
42
  print("processing " + filename)
43
- cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
44
  exclude_columns = [
45
  "RegionID",
46
  "SizeRank",
@@ -126,7 +125,7 @@ combined_df = get_combined_df(
126
  combined_df
127
 
128
 
129
- # In[11]:
130
 
131
 
132
  final_df = combined_df
@@ -152,7 +151,7 @@ for index, row in final_df.iterrows():
152
  final_df
153
 
154
 
155
- # In[12]:
156
 
157
 
158
  final_df = final_df.rename(
@@ -172,8 +171,8 @@ final_df["Date"] = pd.to_datetime(final_df["Date"], format="%Y-%m-%d")
172
  final_df
173
 
174
 
175
- # In[13]:
176
 
177
 
178
- save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
179
 
 
8
  import os
9
 
10
  from helpers import (
11
+ get_data_path_for_config,
12
  get_combined_df,
13
  save_final_df_as_jsonl,
14
  handle_slug_column_mappings,
 
19
  # In[2]:
20
 
21
 
22
+ CONFIG_NAME = "home_values"
 
 
 
 
23
 
24
 
25
+ # In[3]:
26
 
27
 
28
  data_frames = []
 
34
  "": "ZHVI",
35
  }
36
 
37
+ data_dir_path = get_data_path_for_config(CONFIG_NAME)
38
+
39
+ for filename in os.listdir(data_dir_path):
40
  if filename.endswith(".csv"):
41
  print("processing " + filename)
42
+ cur_df = pd.read_csv(os.path.join(data_dir_path, filename))
43
  exclude_columns = [
44
  "RegionID",
45
  "SizeRank",
 
125
  combined_df
126
 
127
 
128
+ # In[4]:
129
 
130
 
131
  final_df = combined_df
 
151
  final_df
152
 
153
 
154
+ # In[5]:
155
 
156
 
157
  final_df = final_df.rename(
 
171
  final_df
172
 
173
 
174
+ # In[6]:
175
 
176
 
177
+ save_final_df_as_jsonl(CONFIG_NAME, final_df)
178
 
processors/home_values_forecasts.ipynb CHANGED
@@ -9,7 +9,7 @@
9
  "import pandas as pd\n",
10
  "import os\n",
11
  "\n",
12
- "from helpers import get_combined_df, save_final_df_as_jsonl"
13
  ]
14
  },
15
  {
@@ -18,11 +18,7 @@
18
  "metadata": {},
19
  "outputs": [],
20
  "source": [
21
- "DATA_DIR = \"../data/\"\n",
22
- "PROCESSED_DIR = \"../processed/\"\n",
23
- "FACET_DIR = \"home_values_forecasts/\"\n",
24
- "FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)\n",
25
- "FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)"
26
  ]
27
  },
28
  {
@@ -384,10 +380,12 @@
384
  "source": [
385
  "data_frames = []\n",
386
  "\n",
387
- "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
 
 
388
  " if filename.endswith(\".csv\"):\n",
389
  " print(\"processing \" + filename)\n",
390
- " cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))\n",
391
  "\n",
392
  " cols = [\"Month Over Month %\", \"Quarter Over Quarter %\", \"Year Over Year %\"]\n",
393
  " if filename.endswith(\"sm_sa_month.csv\"):\n",
@@ -786,11 +784,11 @@
786
  },
787
  {
788
  "cell_type": "code",
789
- "execution_count": 9,
790
  "metadata": {},
791
  "outputs": [],
792
  "source": [
793
- "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
794
  ]
795
  }
796
  ],
 
9
  "import pandas as pd\n",
10
  "import os\n",
11
  "\n",
12
+ "from helpers import get_data_path_for_config, get_combined_df, save_final_df_as_jsonl"
13
  ]
14
  },
15
  {
 
18
  "metadata": {},
19
  "outputs": [],
20
  "source": [
21
+ "CONFIG_NAME = \"home_values_forecasts\""
 
 
 
 
22
  ]
23
  },
24
  {
 
380
  "source": [
381
  "data_frames = []\n",
382
  "\n",
383
+ "data_dir_path = get_data_path_for_config(CONFIG_NAME)\n",
384
+ "\n",
385
+ "for filename in os.listdir(data_dir_path):\n",
386
  " if filename.endswith(\".csv\"):\n",
387
  " print(\"processing \" + filename)\n",
388
+ " cur_df = pd.read_csv(os.path.join(data_dir_path, filename))\n",
389
  "\n",
390
  " cols = [\"Month Over Month %\", \"Quarter Over Quarter %\", \"Year Over Year %\"]\n",
391
  " if filename.endswith(\"sm_sa_month.csv\"):\n",
 
784
  },
785
  {
786
  "cell_type": "code",
787
+ "execution_count": 5,
788
  "metadata": {},
789
  "outputs": [],
790
  "source": [
791
+ "save_final_df_as_jsonl(CONFIG_NAME, final_df)"
792
  ]
793
  }
794
  ],
processors/home_values_forecasts.py CHANGED
@@ -7,17 +7,13 @@
7
  import pandas as pd
8
  import os
9
 
10
- from helpers import get_combined_df, save_final_df_as_jsonl
11
 
12
 
13
  # In[2]:
14
 
15
 
16
- DATA_DIR = "../data/"
17
- PROCESSED_DIR = "../processed/"
18
- FACET_DIR = "home_values_forecasts/"
19
- FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
20
- FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
21
 
22
 
23
  # In[3]:
@@ -25,10 +21,12 @@ FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
25
 
26
  data_frames = []
27
 
28
- for filename in os.listdir(FULL_DATA_DIR_PATH):
 
 
29
  if filename.endswith(".csv"):
30
  print("processing " + filename)
31
- cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
32
 
33
  cols = ["Month Over Month %", "Quarter Over Quarter %", "Year Over Year %"]
34
  if filename.endswith("sm_sa_month.csv"):
@@ -59,7 +57,7 @@ combined_df = get_combined_df(
59
  combined_df
60
 
61
 
62
- # In[1]:
63
 
64
 
65
  # Adjust columns
@@ -93,8 +91,8 @@ final_df["Date"] = pd.to_datetime(final_df["Date"], format="%Y-%m-%d")
93
  final_df
94
 
95
 
96
- # In[9]:
97
 
98
 
99
- save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
100
 
 
7
  import pandas as pd
8
  import os
9
 
10
+ from helpers import get_data_path_for_config, get_combined_df, save_final_df_as_jsonl
11
 
12
 
13
  # In[2]:
14
 
15
 
16
+ CONFIG_NAME = "home_values_forecasts"
 
 
 
 
17
 
18
 
19
  # In[3]:
 
21
 
22
  data_frames = []
23
 
24
+ data_dir_path = get_data_path_for_config(CONFIG_NAME)
25
+
26
+ for filename in os.listdir(data_dir_path):
27
  if filename.endswith(".csv"):
28
  print("processing " + filename)
29
+ cur_df = pd.read_csv(os.path.join(data_dir_path, filename))
30
 
31
  cols = ["Month Over Month %", "Quarter Over Quarter %", "Year Over Year %"]
32
  if filename.endswith("sm_sa_month.csv"):
 
57
  combined_df
58
 
59
 
60
+ # In[4]:
61
 
62
 
63
  # Adjust columns
 
91
  final_df
92
 
93
 
94
+ # In[5]:
95
 
96
 
97
+ save_final_df_as_jsonl(CONFIG_NAME, final_df)
98
 
processors/new_construction.ipynb CHANGED
@@ -10,6 +10,7 @@
10
  "import os\n",
11
  "\n",
12
  "from helpers import (\n",
 
13
  " get_combined_df,\n",
14
  " save_final_df_as_jsonl,\n",
15
  " handle_slug_column_mappings,\n",
@@ -23,11 +24,7 @@
23
  "metadata": {},
24
  "outputs": [],
25
  "source": [
26
- "DATA_DIR = \"../data\"\n",
27
- "PROCESSED_DIR = \"../processed/\"\n",
28
- "FACET_DIR = \"new_construction/\"\n",
29
- "FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)\n",
30
- "FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)"
31
  ]
32
  },
33
  {
@@ -268,6 +265,8 @@
268
  }
269
  ],
270
  "source": [
 
 
271
  "exclude_columns = [\n",
272
  " \"RegionID\",\n",
273
  " \"SizeRank\",\n",
@@ -283,12 +282,12 @@
283
  " \"sales_count\": \"Sales Count\",\n",
284
  "}\n",
285
  "\n",
286
- "data_frames = []\n",
287
  "\n",
288
- "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
289
  " if filename.endswith(\".csv\"):\n",
290
  " print(\"processing \" + filename)\n",
291
- " cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))\n",
292
  "\n",
293
  " cur_df = set_home_type(cur_df, filename)\n",
294
  "\n",
@@ -558,7 +557,7 @@
558
  "metadata": {},
559
  "outputs": [],
560
  "source": [
561
- "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
562
  ]
563
  }
564
  ],
 
10
  "import os\n",
11
  "\n",
12
  "from helpers import (\n",
13
+ " get_data_path_for_config,\n",
14
  " get_combined_df,\n",
15
  " save_final_df_as_jsonl,\n",
16
  " handle_slug_column_mappings,\n",
 
24
  "metadata": {},
25
  "outputs": [],
26
  "source": [
27
+ "CONFIG_NAME = \"new_construction\""
 
 
 
 
28
  ]
29
  },
30
  {
 
265
  }
266
  ],
267
  "source": [
268
+ "data_frames = []\n",
269
+ "\n",
270
  "exclude_columns = [\n",
271
  " \"RegionID\",\n",
272
  " \"SizeRank\",\n",
 
282
  " \"sales_count\": \"Sales Count\",\n",
283
  "}\n",
284
  "\n",
285
+ "data_dir_path = get_data_path_for_config(CONFIG_NAME)\n",
286
  "\n",
287
+ "for filename in os.listdir(data_dir_path):\n",
288
  " if filename.endswith(\".csv\"):\n",
289
  " print(\"processing \" + filename)\n",
290
+ " cur_df = pd.read_csv(os.path.join(data_dir_path, filename))\n",
291
  "\n",
292
  " cur_df = set_home_type(cur_df, filename)\n",
293
  "\n",
 
557
  "metadata": {},
558
  "outputs": [],
559
  "source": [
560
+ "save_final_df_as_jsonl(CONFIG_NAME, final_df)"
561
  ]
562
  }
563
  ],
processors/new_construction.py CHANGED
@@ -8,6 +8,7 @@ import pandas as pd
8
  import os
9
 
10
  from helpers import (
 
11
  get_combined_df,
12
  save_final_df_as_jsonl,
13
  handle_slug_column_mappings,
@@ -18,16 +19,14 @@ from helpers import (
18
  # In[2]:
19
 
20
 
21
- DATA_DIR = "../data"
22
- PROCESSED_DIR = "../processed/"
23
- FACET_DIR = "new_construction/"
24
- FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
25
- FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
26
 
27
 
28
  # In[3]:
29
 
30
 
 
 
31
  exclude_columns = [
32
  "RegionID",
33
  "SizeRank",
@@ -43,12 +42,12 @@ slug_column_mappings = {
43
  "sales_count": "Sales Count",
44
  }
45
 
46
- data_frames = []
47
 
48
- for filename in os.listdir(FULL_DATA_DIR_PATH):
49
  if filename.endswith(".csv"):
50
  print("processing " + filename)
51
- cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
52
 
53
  cur_df = set_home_type(cur_df, filename)
54
 
@@ -95,5 +94,5 @@ final_df.sort_values(by=["Region ID", "Home Type", "Date"])
95
  # In[5]:
96
 
97
 
98
- save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
99
 
 
8
  import os
9
 
10
  from helpers import (
11
+ get_data_path_for_config,
12
  get_combined_df,
13
  save_final_df_as_jsonl,
14
  handle_slug_column_mappings,
 
19
  # In[2]:
20
 
21
 
22
+ CONFIG_NAME = "new_construction"
 
 
 
 
23
 
24
 
25
  # In[3]:
26
 
27
 
28
+ data_frames = []
29
+
30
  exclude_columns = [
31
  "RegionID",
32
  "SizeRank",
 
42
  "sales_count": "Sales Count",
43
  }
44
 
45
+ data_dir_path = get_data_path_for_config(CONFIG_NAME)
46
 
47
+ for filename in os.listdir(data_dir_path):
48
  if filename.endswith(".csv"):
49
  print("processing " + filename)
50
+ cur_df = pd.read_csv(os.path.join(data_dir_path, filename))
51
 
52
  cur_df = set_home_type(cur_df, filename)
53
 
 
94
  # In[5]:
95
 
96
 
97
+ save_final_df_as_jsonl(CONFIG_NAME, final_df)
98
 
processors/rentals.ipynb CHANGED
@@ -2,7 +2,7 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 1,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
@@ -10,6 +10,7 @@
10
  "import os\n",
11
  "\n",
12
  "from helpers import (\n",
 
13
  " get_combined_df,\n",
14
  " save_final_df_as_jsonl,\n",
15
  " handle_slug_column_mappings,\n",
@@ -19,347 +20,29 @@
19
  },
20
  {
21
  "cell_type": "code",
22
- "execution_count": 2,
23
  "metadata": {},
24
  "outputs": [],
25
  "source": [
26
- "DATA_DIR = \"../data\"\n",
27
- "PROCESSED_DIR = \"../processed/\"\n",
28
- "FACET_DIR = \"rentals/\"\n",
29
- "FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)\n",
30
- "FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)"
31
  ]
32
  },
33
  {
34
  "cell_type": "code",
35
  "execution_count": 3,
36
  "metadata": {},
37
- "outputs": [
38
- {
39
- "data": {
40
- "text/html": [
41
- "<div>\n",
42
- "<style scoped>\n",
43
- " .dataframe tbody tr th:only-of-type {\n",
44
- " vertical-align: middle;\n",
45
- " }\n",
46
- "\n",
47
- " .dataframe tbody tr th {\n",
48
- " vertical-align: top;\n",
49
- " }\n",
50
- "\n",
51
- " .dataframe thead th {\n",
52
- " text-align: right;\n",
53
- " }\n",
54
- "</style>\n",
55
- "<table border=\"1\" class=\"dataframe\">\n",
56
- " <thead>\n",
57
- " <tr style=\"text-align: right;\">\n",
58
- " <th></th>\n",
59
- " <th>RegionID</th>\n",
60
- " <th>SizeRank</th>\n",
61
- " <th>RegionName</th>\n",
62
- " <th>RegionType</th>\n",
63
- " <th>StateName</th>\n",
64
- " <th>Home Type</th>\n",
65
- " <th>State</th>\n",
66
- " <th>Metro</th>\n",
67
- " <th>StateCodeFIPS</th>\n",
68
- " <th>MunicipalCodeFIPS</th>\n",
69
- " <th>Date</th>\n",
70
- " <th>Rent (Smoothed)</th>\n",
71
- " <th>CountyName</th>\n",
72
- " <th>Rent (Smoothed) (Seasonally Adjusted)</th>\n",
73
- " <th>City</th>\n",
74
- " </tr>\n",
75
- " </thead>\n",
76
- " <tbody>\n",
77
- " <tr>\n",
78
- " <th>0</th>\n",
79
- " <td>66</td>\n",
80
- " <td>146</td>\n",
81
- " <td>Ada County</td>\n",
82
- " <td>county</td>\n",
83
- " <td>ID</td>\n",
84
- " <td>all homes plus multifamily</td>\n",
85
- " <td>16.0</td>\n",
86
- " <td>Boise City, ID</td>\n",
87
- " <td>16.0</td>\n",
88
- " <td>1.0</td>\n",
89
- " <td>2015-01-31</td>\n",
90
- " <td>927.493763</td>\n",
91
- " <td>NaN</td>\n",
92
- " <td>927.493763</td>\n",
93
- " <td>NaN</td>\n",
94
- " </tr>\n",
95
- " <tr>\n",
96
- " <th>1</th>\n",
97
- " <td>66</td>\n",
98
- " <td>146</td>\n",
99
- " <td>Ada County</td>\n",
100
- " <td>county</td>\n",
101
- " <td>ID</td>\n",
102
- " <td>all homes plus multifamily</td>\n",
103
- " <td>16.0</td>\n",
104
- " <td>Boise City, ID</td>\n",
105
- " <td>16.0</td>\n",
106
- " <td>1.0</td>\n",
107
- " <td>2015-02-28</td>\n",
108
- " <td>931.690623</td>\n",
109
- " <td>NaN</td>\n",
110
- " <td>931.690623</td>\n",
111
- " <td>NaN</td>\n",
112
- " </tr>\n",
113
- " <tr>\n",
114
- " <th>2</th>\n",
115
- " <td>66</td>\n",
116
- " <td>146</td>\n",
117
- " <td>Ada County</td>\n",
118
- " <td>county</td>\n",
119
- " <td>ID</td>\n",
120
- " <td>all homes plus multifamily</td>\n",
121
- " <td>16.0</td>\n",
122
- " <td>Boise City, ID</td>\n",
123
- " <td>16.0</td>\n",
124
- " <td>1.0</td>\n",
125
- " <td>2015-03-31</td>\n",
126
- " <td>932.568601</td>\n",
127
- " <td>NaN</td>\n",
128
- " <td>932.568601</td>\n",
129
- " <td>NaN</td>\n",
130
- " </tr>\n",
131
- " <tr>\n",
132
- " <th>3</th>\n",
133
- " <td>66</td>\n",
134
- " <td>146</td>\n",
135
- " <td>Ada County</td>\n",
136
- " <td>county</td>\n",
137
- " <td>ID</td>\n",
138
- " <td>all homes plus multifamily</td>\n",
139
- " <td>16.0</td>\n",
140
- " <td>Boise City, ID</td>\n",
141
- " <td>16.0</td>\n",
142
- " <td>1.0</td>\n",
143
- " <td>2015-04-30</td>\n",
144
- " <td>933.148134</td>\n",
145
- " <td>NaN</td>\n",
146
- " <td>933.148134</td>\n",
147
- " <td>NaN</td>\n",
148
- " </tr>\n",
149
- " <tr>\n",
150
- " <th>4</th>\n",
151
- " <td>66</td>\n",
152
- " <td>146</td>\n",
153
- " <td>Ada County</td>\n",
154
- " <td>county</td>\n",
155
- " <td>ID</td>\n",
156
- " <td>all homes plus multifamily</td>\n",
157
- " <td>16.0</td>\n",
158
- " <td>Boise City, ID</td>\n",
159
- " <td>16.0</td>\n",
160
- " <td>1.0</td>\n",
161
- " <td>2015-05-31</td>\n",
162
- " <td>941.045724</td>\n",
163
- " <td>NaN</td>\n",
164
- " <td>941.045724</td>\n",
165
- " <td>NaN</td>\n",
166
- " </tr>\n",
167
- " <tr>\n",
168
- " <th>...</th>\n",
169
- " <td>...</td>\n",
170
- " <td>...</td>\n",
171
- " <td>...</td>\n",
172
- " <td>...</td>\n",
173
- " <td>...</td>\n",
174
- " <td>...</td>\n",
175
- " <td>...</td>\n",
176
- " <td>...</td>\n",
177
- " <td>...</td>\n",
178
- " <td>...</td>\n",
179
- " <td>...</td>\n",
180
- " <td>...</td>\n",
181
- " <td>...</td>\n",
182
- " <td>...</td>\n",
183
- " <td>...</td>\n",
184
- " </tr>\n",
185
- " <tr>\n",
186
- " <th>1258735</th>\n",
187
- " <td>857850</td>\n",
188
- " <td>713</td>\n",
189
- " <td>Cherry Hill</td>\n",
190
- " <td>city</td>\n",
191
- " <td>NJ</td>\n",
192
- " <td>all homes plus multifamily</td>\n",
193
- " <td>NJ</td>\n",
194
- " <td>Philadelphia-Camden-Wilmington, PA-NJ-DE-MD</td>\n",
195
- " <td>NaN</td>\n",
196
- " <td>NaN</td>\n",
197
- " <td>2023-08-31</td>\n",
198
- " <td>2291.604800</td>\n",
199
- " <td>Camden County</td>\n",
200
- " <td>2244.961006</td>\n",
201
- " <td>NaN</td>\n",
202
- " </tr>\n",
203
- " <tr>\n",
204
- " <th>1258736</th>\n",
205
- " <td>857850</td>\n",
206
- " <td>713</td>\n",
207
- " <td>Cherry Hill</td>\n",
208
- " <td>city</td>\n",
209
- " <td>NJ</td>\n",
210
- " <td>all homes plus multifamily</td>\n",
211
- " <td>NJ</td>\n",
212
- " <td>Philadelphia-Camden-Wilmington, PA-NJ-DE-MD</td>\n",
213
- " <td>NaN</td>\n",
214
- " <td>NaN</td>\n",
215
- " <td>2023-09-30</td>\n",
216
- " <td>2296.188906</td>\n",
217
- " <td>Camden County</td>\n",
218
- " <td>2254.213172</td>\n",
219
- " <td>NaN</td>\n",
220
- " </tr>\n",
221
- " <tr>\n",
222
- " <th>1258737</th>\n",
223
- " <td>857850</td>\n",
224
- " <td>713</td>\n",
225
- " <td>Cherry Hill</td>\n",
226
- " <td>city</td>\n",
227
- " <td>NJ</td>\n",
228
- " <td>all homes plus multifamily</td>\n",
229
- " <td>NJ</td>\n",
230
- " <td>Philadelphia-Camden-Wilmington, PA-NJ-DE-MD</td>\n",
231
- " <td>NaN</td>\n",
232
- " <td>NaN</td>\n",
233
- " <td>2023-10-31</td>\n",
234
- " <td>2292.270938</td>\n",
235
- " <td>Camden County</td>\n",
236
- " <td>2261.540446</td>\n",
237
- " <td>NaN</td>\n",
238
- " </tr>\n",
239
- " <tr>\n",
240
- " <th>1258738</th>\n",
241
- " <td>857850</td>\n",
242
- " <td>713</td>\n",
243
- " <td>Cherry Hill</td>\n",
244
- " <td>city</td>\n",
245
- " <td>NJ</td>\n",
246
- " <td>all homes plus multifamily</td>\n",
247
- " <td>NJ</td>\n",
248
- " <td>Philadelphia-Camden-Wilmington, PA-NJ-DE-MD</td>\n",
249
- " <td>NaN</td>\n",
250
- " <td>NaN</td>\n",
251
- " <td>2023-11-30</td>\n",
252
- " <td>2253.417140</td>\n",
253
- " <td>Camden County</td>\n",
254
- " <td>2257.956024</td>\n",
255
- " <td>NaN</td>\n",
256
- " </tr>\n",
257
- " <tr>\n",
258
- " <th>1258739</th>\n",
259
- " <td>857850</td>\n",
260
- " <td>713</td>\n",
261
- " <td>Cherry Hill</td>\n",
262
- " <td>city</td>\n",
263
- " <td>NJ</td>\n",
264
- " <td>all homes plus multifamily</td>\n",
265
- " <td>NJ</td>\n",
266
- " <td>Philadelphia-Camden-Wilmington, PA-NJ-DE-MD</td>\n",
267
- " <td>NaN</td>\n",
268
- " <td>NaN</td>\n",
269
- " <td>2023-12-31</td>\n",
270
- " <td>2280.830303</td>\n",
271
- " <td>Camden County</td>\n",
272
- " <td>2280.830303</td>\n",
273
- " <td>NaN</td>\n",
274
- " </tr>\n",
275
- " </tbody>\n",
276
- "</table>\n",
277
- "<p>1258740 rows × 15 columns</p>\n",
278
- "</div>"
279
- ],
280
- "text/plain": [
281
- " RegionID SizeRank RegionName RegionType StateName \\\n",
282
- "0 66 146 Ada County county ID \n",
283
- "1 66 146 Ada County county ID \n",
284
- "2 66 146 Ada County county ID \n",
285
- "3 66 146 Ada County county ID \n",
286
- "4 66 146 Ada County county ID \n",
287
- "... ... ... ... ... ... \n",
288
- "1258735 857850 713 Cherry Hill city NJ \n",
289
- "1258736 857850 713 Cherry Hill city NJ \n",
290
- "1258737 857850 713 Cherry Hill city NJ \n",
291
- "1258738 857850 713 Cherry Hill city NJ \n",
292
- "1258739 857850 713 Cherry Hill city NJ \n",
293
- "\n",
294
- " Home Type State \\\n",
295
- "0 all homes plus multifamily 16.0 \n",
296
- "1 all homes plus multifamily 16.0 \n",
297
- "2 all homes plus multifamily 16.0 \n",
298
- "3 all homes plus multifamily 16.0 \n",
299
- "4 all homes plus multifamily 16.0 \n",
300
- "... ... ... \n",
301
- "1258735 all homes plus multifamily NJ \n",
302
- "1258736 all homes plus multifamily NJ \n",
303
- "1258737 all homes plus multifamily NJ \n",
304
- "1258738 all homes plus multifamily NJ \n",
305
- "1258739 all homes plus multifamily NJ \n",
306
- "\n",
307
- " Metro StateCodeFIPS \\\n",
308
- "0 Boise City, ID 16.0 \n",
309
- "1 Boise City, ID 16.0 \n",
310
- "2 Boise City, ID 16.0 \n",
311
- "3 Boise City, ID 16.0 \n",
312
- "4 Boise City, ID 16.0 \n",
313
- "... ... ... \n",
314
- "1258735 Philadelphia-Camden-Wilmington, PA-NJ-DE-MD NaN \n",
315
- "1258736 Philadelphia-Camden-Wilmington, PA-NJ-DE-MD NaN \n",
316
- "1258737 Philadelphia-Camden-Wilmington, PA-NJ-DE-MD NaN \n",
317
- "1258738 Philadelphia-Camden-Wilmington, PA-NJ-DE-MD NaN \n",
318
- "1258739 Philadelphia-Camden-Wilmington, PA-NJ-DE-MD NaN \n",
319
- "\n",
320
- " MunicipalCodeFIPS Date Rent (Smoothed) CountyName \\\n",
321
- "0 1.0 2015-01-31 927.493763 NaN \n",
322
- "1 1.0 2015-02-28 931.690623 NaN \n",
323
- "2 1.0 2015-03-31 932.568601 NaN \n",
324
- "3 1.0 2015-04-30 933.148134 NaN \n",
325
- "4 1.0 2015-05-31 941.045724 NaN \n",
326
- "... ... ... ... ... \n",
327
- "1258735 NaN 2023-08-31 2291.604800 Camden County \n",
328
- "1258736 NaN 2023-09-30 2296.188906 Camden County \n",
329
- "1258737 NaN 2023-10-31 2292.270938 Camden County \n",
330
- "1258738 NaN 2023-11-30 2253.417140 Camden County \n",
331
- "1258739 NaN 2023-12-31 2280.830303 Camden County \n",
332
- "\n",
333
- " Rent (Smoothed) (Seasonally Adjusted) City \n",
334
- "0 927.493763 NaN \n",
335
- "1 931.690623 NaN \n",
336
- "2 932.568601 NaN \n",
337
- "3 933.148134 NaN \n",
338
- "4 941.045724 NaN \n",
339
- "... ... ... \n",
340
- "1258735 2244.961006 NaN \n",
341
- "1258736 2254.213172 NaN \n",
342
- "1258737 2261.540446 NaN \n",
343
- "1258738 2257.956024 NaN \n",
344
- "1258739 2280.830303 NaN \n",
345
- "\n",
346
- "[1258740 rows x 15 columns]"
347
- ]
348
- },
349
- "execution_count": 3,
350
- "metadata": {},
351
- "output_type": "execute_result"
352
- }
353
- ],
354
  "source": [
355
  "data_frames = []\n",
356
  "\n",
357
  "slug_column_mappings = {\"\": \"Rent\"}\n",
358
  "\n",
359
- "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
 
 
360
  " if filename.endswith(\".csv\"):\n",
361
- " # print(\"processing \" + filename)\n",
362
- " cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))\n",
363
  " exclude_columns = [\n",
364
  " \"RegionID\",\n",
365
  " \"SizeRank\",\n",
@@ -1095,7 +778,7 @@
1095
  "metadata": {},
1096
  "outputs": [],
1097
  "source": [
1098
- "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
1099
  ]
1100
  }
1101
  ],
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 2,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
 
10
  "import os\n",
11
  "\n",
12
  "from helpers import (\n",
13
+ " get_data_path_for_config,\n",
14
  " get_combined_df,\n",
15
  " save_final_df_as_jsonl,\n",
16
  " handle_slug_column_mappings,\n",
 
20
  },
21
  {
22
  "cell_type": "code",
23
+ "execution_count": 1,
24
  "metadata": {},
25
  "outputs": [],
26
  "source": [
27
+ "CONFIG_NAME = \"rentals\""
 
 
 
 
28
  ]
29
  },
30
  {
31
  "cell_type": "code",
32
  "execution_count": 3,
33
  "metadata": {},
34
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  "source": [
36
  "data_frames = []\n",
37
  "\n",
38
  "slug_column_mappings = {\"\": \"Rent\"}\n",
39
  "\n",
40
+ "data_dir_path = get_data_path_for_config(CONFIG_NAME)\n",
41
+ "\n",
42
+ "for filename in os.listdir(data_dir_path):\n",
43
  " if filename.endswith(\".csv\"):\n",
44
+ " print(\"processing \" + filename)\n",
45
+ " cur_df = pd.read_csv(os.path.join(data_dir_path, filename))\n",
46
  " exclude_columns = [\n",
47
  " \"RegionID\",\n",
48
  " \"SizeRank\",\n",
 
778
  "metadata": {},
779
  "outputs": [],
780
  "source": [
781
+ "save_final_df_as_jsonl(CONFIG_NAME, final_df)"
782
  ]
783
  }
784
  ],
processors/rentals.py CHANGED
@@ -8,6 +8,7 @@ import pandas as pd
8
  import os
9
 
10
  from helpers import (
 
11
  get_combined_df,
12
  save_final_df_as_jsonl,
13
  handle_slug_column_mappings,
@@ -15,27 +16,25 @@ from helpers import (
15
  )
16
 
17
 
18
- # In[3]:
19
 
20
 
21
- DATA_DIR = "../data"
22
- PROCESSED_DIR = "../processed/"
23
- FACET_DIR = "rentals/"
24
- FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
25
- FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
26
 
27
 
28
- # In[7]:
29
 
30
 
31
  data_frames = []
32
 
33
  slug_column_mappings = {"": "Rent"}
34
 
35
- for filename in os.listdir(FULL_DATA_DIR_PATH):
 
 
36
  if filename.endswith(".csv"):
37
- # print("processing " + filename)
38
- cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
39
  exclude_columns = [
40
  "RegionID",
41
  "SizeRank",
@@ -112,7 +111,7 @@ combined_df = get_combined_df(
112
  combined_df
113
 
114
 
115
- # In[8]:
116
 
117
 
118
  final_df = combined_df
@@ -131,7 +130,7 @@ final_df = final_df.drop(columns=["StateName", "CountyName"])
131
  final_df
132
 
133
 
134
- # In[6]:
135
 
136
 
137
  # Adjust column names
@@ -154,5 +153,5 @@ final_df
154
  # In[7]:
155
 
156
 
157
- save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
158
 
 
8
  import os
9
 
10
  from helpers import (
11
+ get_data_path_for_config,
12
  get_combined_df,
13
  save_final_df_as_jsonl,
14
  handle_slug_column_mappings,
 
16
  )
17
 
18
 
19
+ # In[1]:
20
 
21
 
22
+ CONFIG_NAME = "rentals"
 
 
 
 
23
 
24
 
25
+ # In[3]:
26
 
27
 
28
  data_frames = []
29
 
30
  slug_column_mappings = {"": "Rent"}
31
 
32
+ data_dir_path = get_data_path_for_config(CONFIG_NAME)
33
+
34
+ for filename in os.listdir(data_dir_path):
35
  if filename.endswith(".csv"):
36
+ print("processing " + filename)
37
+ cur_df = pd.read_csv(os.path.join(data_dir_path, filename))
38
  exclude_columns = [
39
  "RegionID",
40
  "SizeRank",
 
111
  combined_df
112
 
113
 
114
+ # In[4]:
115
 
116
 
117
  final_df = combined_df
 
130
  final_df
131
 
132
 
133
+ # In[5]:
134
 
135
 
136
  # Adjust column names
 
153
  # In[7]:
154
 
155
 
156
+ save_final_df_as_jsonl(CONFIG_NAME, final_df)
157
 
processors/sales.ipynb CHANGED
@@ -10,6 +10,7 @@
10
  "import os\n",
11
  "\n",
12
  "from helpers import (\n",
 
13
  " get_combined_df,\n",
14
  " save_final_df_as_jsonl,\n",
15
  " handle_slug_column_mappings,\n",
@@ -23,11 +24,7 @@
23
  "metadata": {},
24
  "outputs": [],
25
  "source": [
26
- "DATA_DIR = \"../data\"\n",
27
- "PROCESSED_DIR = \"../processed/\"\n",
28
- "FACET_DIR = \"sales/\"\n",
29
- "FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)\n",
30
- "FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)"
31
  ]
32
  },
33
  {
@@ -448,6 +445,8 @@
448
  }
449
  ],
450
  "source": [
 
 
451
  "exclude_columns = [\n",
452
  " \"RegionID\",\n",
453
  " \"SizeRank\",\n",
@@ -466,16 +465,16 @@
466
  " \"_sales_count_now_\": \"Nowcast\",\n",
467
  "}\n",
468
  "\n",
469
- "data_frames = []\n",
470
  "\n",
471
- "for filename in os.listdir(FULL_DATA_DIR_PATH):\n",
472
  " if filename.endswith(\".csv\"):\n",
473
  " print(\"processing \" + filename)\n",
474
  " # ignore monthly data for now since it is redundant\n",
475
  " if \"month\" in filename:\n",
476
  " continue\n",
477
  "\n",
478
- " cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))\n",
479
  "\n",
480
  " cur_df = set_home_type(cur_df, filename)\n",
481
  "\n",
@@ -1294,7 +1293,7 @@
1294
  "metadata": {},
1295
  "outputs": [],
1296
  "source": [
1297
- "save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)"
1298
  ]
1299
  }
1300
  ],
 
10
  "import os\n",
11
  "\n",
12
  "from helpers import (\n",
13
+ " get_data_path_for_config,\n",
14
  " get_combined_df,\n",
15
  " save_final_df_as_jsonl,\n",
16
  " handle_slug_column_mappings,\n",
 
24
  "metadata": {},
25
  "outputs": [],
26
  "source": [
27
+ "CONFIG_NAME = \"sales\""
 
 
 
 
28
  ]
29
  },
30
  {
 
445
  }
446
  ],
447
  "source": [
448
+ "data_frames = []\n",
449
+ "\n",
450
  "exclude_columns = [\n",
451
  " \"RegionID\",\n",
452
  " \"SizeRank\",\n",
 
465
  " \"_sales_count_now_\": \"Nowcast\",\n",
466
  "}\n",
467
  "\n",
468
+ "data_dir_path = get_data_path_for_config(CONFIG_NAME)\n",
469
  "\n",
470
+ "for filename in os.listdir(data_dir_path):\n",
471
  " if filename.endswith(\".csv\"):\n",
472
  " print(\"processing \" + filename)\n",
473
  " # ignore monthly data for now since it is redundant\n",
474
  " if \"month\" in filename:\n",
475
  " continue\n",
476
  "\n",
477
+ " cur_df = pd.read_csv(os.path.join(data_dir_path, filename))\n",
478
  "\n",
479
  " cur_df = set_home_type(cur_df, filename)\n",
480
  "\n",
 
1293
  "metadata": {},
1294
  "outputs": [],
1295
  "source": [
1296
+ "save_final_df_as_jsonl(CONFIG_NAME, final_df)"
1297
  ]
1298
  }
1299
  ],
processors/sales.py CHANGED
@@ -1,13 +1,14 @@
1
  #!/usr/bin/env python
2
  # coding: utf-8
3
 
4
- # In[2]:
5
 
6
 
7
  import pandas as pd
8
  import os
9
 
10
  from helpers import (
 
11
  get_combined_df,
12
  save_final_df_as_jsonl,
13
  handle_slug_column_mappings,
@@ -15,19 +16,17 @@ from helpers import (
15
  )
16
 
17
 
18
- # In[3]:
19
 
20
 
21
- DATA_DIR = "../data"
22
- PROCESSED_DIR = "../processed/"
23
- FACET_DIR = "sales/"
24
- FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
25
- FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
26
 
27
 
28
- # In[4]:
29
 
30
 
 
 
31
  exclude_columns = [
32
  "RegionID",
33
  "SizeRank",
@@ -46,16 +45,16 @@ slug_column_mappings = {
46
  "_sales_count_now_": "Nowcast",
47
  }
48
 
49
- data_frames = []
50
 
51
- for filename in os.listdir(FULL_DATA_DIR_PATH):
52
  if filename.endswith(".csv"):
53
  print("processing " + filename)
54
  # ignore monthly data for now since it is redundant
55
  if "month" in filename:
56
  continue
57
 
58
- cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
59
 
60
  cur_df = set_home_type(cur_df, filename)
61
 
@@ -80,7 +79,7 @@ combined_df = get_combined_df(
80
  combined_df
81
 
82
 
83
- # In[52]:
84
 
85
 
86
  # Adjust column names
@@ -98,7 +97,7 @@ final_df["Date"] = pd.to_datetime(final_df["Date"])
98
  final_df.sort_values(by=["Region ID", "Home Type", "Date"])
99
 
100
 
101
- # In[53]:
102
 
103
 
104
  final_df["Date"] = pd.to_datetime(final_df["Date"], format="%Y-%m-%d")
@@ -106,8 +105,8 @@ final_df["Date"] = pd.to_datetime(final_df["Date"], format="%Y-%m-%d")
106
  final_df
107
 
108
 
109
- # In[54]:
110
 
111
 
112
- save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
113
 
 
1
  #!/usr/bin/env python
2
  # coding: utf-8
3
 
4
+ # In[1]:
5
 
6
 
7
  import pandas as pd
8
  import os
9
 
10
  from helpers import (
11
+ get_data_path_for_config,
12
  get_combined_df,
13
  save_final_df_as_jsonl,
14
  handle_slug_column_mappings,
 
16
  )
17
 
18
 
19
+ # In[2]:
20
 
21
 
22
+ CONFIG_NAME = "sales"
 
 
 
 
23
 
24
 
25
+ # In[3]:
26
 
27
 
28
+ data_frames = []
29
+
30
  exclude_columns = [
31
  "RegionID",
32
  "SizeRank",
 
45
  "_sales_count_now_": "Nowcast",
46
  }
47
 
48
+ data_dir_path = get_data_path_for_config(CONFIG_NAME)
49
 
50
+ for filename in os.listdir(data_dir_path):
51
  if filename.endswith(".csv"):
52
  print("processing " + filename)
53
  # ignore monthly data for now since it is redundant
54
  if "month" in filename:
55
  continue
56
 
57
+ cur_df = pd.read_csv(os.path.join(data_dir_path, filename))
58
 
59
  cur_df = set_home_type(cur_df, filename)
60
 
 
79
  combined_df
80
 
81
 
82
+ # In[4]:
83
 
84
 
85
  # Adjust column names
 
97
  final_df.sort_values(by=["Region ID", "Home Type", "Date"])
98
 
99
 
100
+ # In[5]:
101
 
102
 
103
  final_df["Date"] = pd.to_datetime(final_df["Date"], format="%Y-%m-%d")
 
105
  final_df
106
 
107
 
108
+ # In[6]:
109
 
110
 
111
+ save_final_df_as_jsonl(CONFIG_NAME, final_df)
112