restructuring
Browse files
describe.py β scripts/describe.py
RENAMED
@@ -1,7 +1,7 @@
|
|
1 |
import logging
|
2 |
import pandas as pd
|
3 |
from pathlib import Path
|
4 |
-
from utils import DataLoader, SCAPlotter, TextProcessor, TopicModeling, DATA_ANALYSIS_PATH
|
5 |
|
6 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
7 |
|
|
|
1 |
import logging
|
2 |
import pandas as pd
|
3 |
from pathlib import Path
|
4 |
+
from scripts.utils import DataLoader, SCAPlotter, TextProcessor, TopicModeling, DATA_ANALYSIS_PATH
|
5 |
|
6 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
7 |
|
downloads.sh β scripts/downloads.sh
RENAMED
File without changes
|
extract.py β scripts/extract.py
RENAMED
@@ -2,18 +2,18 @@ import os
|
|
2 |
import pandas as pd
|
3 |
from pathlib import Path
|
4 |
from tqdm.notebook import tqdm
|
5 |
-
from utils import FileManager, PDFExtractor
|
6 |
|
7 |
-
FileManager.unzip_data('../
|
8 |
|
9 |
directories = {
|
10 |
"with_summaries": {
|
11 |
-
"path": Path('../
|
12 |
"columns": ['id', 'type', 'year', 'main_judgement', 'media_summary'],
|
13 |
"has_summary": True
|
14 |
},
|
15 |
"without_summaries": {
|
16 |
-
"path": Path('../
|
17 |
"columns": ['id', 'type', 'year', 'main_judgement'],
|
18 |
"has_summary": False
|
19 |
}
|
@@ -22,6 +22,8 @@ directories = {
|
|
22 |
for dir_key, dir_info in directories.items():
|
23 |
data = []
|
24 |
pdir = dir_info["path"]
|
|
|
|
|
25 |
|
26 |
for root, dirs, files in tqdm(os.walk(pdir)):
|
27 |
if not files:
|
@@ -43,4 +45,4 @@ for dir_key, dir_info in directories.items():
|
|
43 |
continue
|
44 |
|
45 |
df = pd.DataFrame(data, columns=dir_info["columns"])
|
46 |
-
df.to_csv(f'
|
|
|
2 |
import pandas as pd
|
3 |
from pathlib import Path
|
4 |
from tqdm.notebook import tqdm
|
5 |
+
from scripts.utils import FileManager, PDFExtractor
|
6 |
|
7 |
+
FileManager.unzip_data('../raw.zip', '..')
|
8 |
|
9 |
directories = {
|
10 |
"with_summaries": {
|
11 |
+
"path": Path('../raw/with_summaries'),
|
12 |
"columns": ['id', 'type', 'year', 'main_judgement', 'media_summary'],
|
13 |
"has_summary": True
|
14 |
},
|
15 |
"without_summaries": {
|
16 |
+
"path": Path('../raw/without_summaries'),
|
17 |
"columns": ['id', 'type', 'year', 'main_judgement'],
|
18 |
"has_summary": False
|
19 |
}
|
|
|
22 |
for dir_key, dir_info in directories.items():
|
23 |
data = []
|
24 |
pdir = dir_info["path"]
|
25 |
+
output_dir = '../processed' / Path(dir_key)
|
26 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
27 |
|
28 |
for root, dirs, files in tqdm(os.walk(pdir)):
|
29 |
if not files:
|
|
|
45 |
continue
|
46 |
|
47 |
df = pd.DataFrame(data, columns=dir_info["columns"])
|
48 |
+
df.to_csv(f'{output_dir}/judgments.tsv', sep='\t', index=False)
|
requirements.txt β scripts/requirements.txt
RENAMED
File without changes
|
utils.py β scripts/utils.py
RENAMED
@@ -41,15 +41,15 @@ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(
|
|
41 |
|
42 |
HOME_DIR = Path("..")
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
GLOVE_EMBEDDINGS_FILE = EXTRACTED_DATA_DIR / "glove.6B.100d.txt"
|
48 |
|
49 |
DATA_ANALYSIS_PATH = HOME_DIR / "data_analysis"
|
50 |
FIGURES_DIR = DATA_ANALYSIS_PATH / "plots"
|
51 |
|
52 |
FIGURES_DIR.mkdir(parents=True, exist_ok=True)
|
|
|
53 |
POST_TAGS = ['ADJ','ADP','ADV','CONJ','DET','NOUN','NUM','PRT','PRON','VERB','.','X']
|
54 |
|
55 |
|
|
|
41 |
|
42 |
HOME_DIR = Path("..")
|
43 |
|
44 |
+
RAW_DATA_DIR = HOME_DIR / "raw"
|
45 |
+
PROCESSED_DATA_DIR = HOME_DIR / "processed"
|
46 |
+
GLOVE_EMBEDDINGS_FILE = PROCESSED_DATA_DIR / "glove.6B.100d.txt"
|
|
|
47 |
|
48 |
DATA_ANALYSIS_PATH = HOME_DIR / "data_analysis"
|
49 |
FIGURES_DIR = DATA_ANALYSIS_PATH / "plots"
|
50 |
|
51 |
FIGURES_DIR.mkdir(parents=True, exist_ok=True)
|
52 |
+
PROCESSED_DATA_DIR.mkdir(parents=True, exist_ok=True)
|
53 |
POST_TAGS = ['ADJ','ADP','ADV','CONJ','DET','NOUN','NUM','PRT','PRON','VERB','.','X']
|
54 |
|
55 |
|