Datasets:
raphael0202
commited on
Commit
•
3325433
1
Parent(s):
5527739
Upload generate_rnm_ds.py
Browse files- generate_rnm_ds.py +80 -0
generate_rnm_ds.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import tempfile
|
3 |
+
import zipfile
|
4 |
+
from pathlib import Path
|
5 |
+
from typing import Iterator
|
6 |
+
|
7 |
+
import duckdb
|
8 |
+
import requests
|
9 |
+
import tqdm
|
10 |
+
|
11 |
+
DUCKDB_SQL = (
|
12 |
+
"""COPY (SELECT * FROM read_csv('%s', delim=';', AUTO_DETECT=TRUE)) TO '%s';"""
|
13 |
+
)
|
14 |
+
|
15 |
+
|
16 |
+
def csv_iter(url: str, strip_header: bool = False) -> Iterator[list[str]]:
|
17 |
+
"""Download France AgriMer RNM CSV file from URL to
|
18 |
+
output_file."""
|
19 |
+
r = requests.get(url, stream=True)
|
20 |
+
|
21 |
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
22 |
+
tmp_zip_path = Path(tmp_dir) / "tmp.zip"
|
23 |
+
|
24 |
+
# Copy the request content (a ZIP file) to a temporary file
|
25 |
+
with tmp_zip_path.open("wb") as f:
|
26 |
+
for chunk in r.iter_content(chunk_size=1024):
|
27 |
+
if chunk:
|
28 |
+
f.write(chunk)
|
29 |
+
|
30 |
+
# Extract the name of the file we're interested in, there should be
|
31 |
+
# only one file in the ZIP archive
|
32 |
+
zip_file = zipfile.ZipFile(tmp_zip_path)
|
33 |
+
zip_file_info = zip_file.infolist()
|
34 |
+
|
35 |
+
if len(zip_file_info) != 1:
|
36 |
+
raise ValueError("Expecting one file in zip")
|
37 |
+
|
38 |
+
filename = zip_file_info[0].filename
|
39 |
+
tmp_path = Path(tmp_dir) / filename
|
40 |
+
|
41 |
+
# Extract the file to a temporary location
|
42 |
+
with zip_file.open(filename, "r") as f_in:
|
43 |
+
with tmp_path.open("wb") as f_out:
|
44 |
+
while True:
|
45 |
+
chunk = f_in.read(1024)
|
46 |
+
if chunk == b"":
|
47 |
+
break
|
48 |
+
f_out.write(chunk)
|
49 |
+
|
50 |
+
# The text is in ISO-8859-1 encoding
|
51 |
+
# We strip spaces before/after all columns
|
52 |
+
with open(tmp_path, "rt", encoding="ISO-8859-1", newline="") as f_in:
|
53 |
+
csv_reader = csv.reader(f_in, delimiter=";")
|
54 |
+
for i, row in enumerate(csv_reader):
|
55 |
+
if strip_header and i == 0:
|
56 |
+
continue
|
57 |
+
yield [col.strip() for col in row]
|
58 |
+
|
59 |
+
|
60 |
+
def generate_csv(output_path: Path, start_year: int, end_year: int) -> None:
|
61 |
+
with open(output_path, "wt") as f_out:
|
62 |
+
csv_writer = csv.writer(f_out, delimiter=";")
|
63 |
+
for i in tqdm.tqdm(range(start_year, end_year + 1), desc="years"):
|
64 |
+
url_template = f"https://visionet.franceagrimer.fr/Pages/OpenDocument.aspx?fileurl=Statistiques%2fmulti-filieres%2fcotations%20des%20produits%20frais%2fCOT-MUL-prd_RNM-A{i:02d}.zip&telechargersanscomptage=oui"
|
65 |
+
|
66 |
+
for item in tqdm.tqdm(
|
67 |
+
csv_iter(url_template, strip_header=(i != start_year)), desc="rows"
|
68 |
+
):
|
69 |
+
csv_writer.writerow(item)
|
70 |
+
|
71 |
+
|
72 |
+
def export_parquet(input_path: Path, output_path: Path) -> None:
|
73 |
+
conn = duckdb.connect(":memory:")
|
74 |
+
conn.execute(DUCKDB_SQL % (input_path, output_path))
|
75 |
+
|
76 |
+
|
77 |
+
OUTPUT_CSV_PATH = Path("COT-MUL-prd_RNM.csv")
|
78 |
+
OUTPUT_PARQUET_PATH = Path("COT-MUL-prd_RNM.parquet")
|
79 |
+
generate_csv(OUTPUT_CSV_PATH, start_year=0, end_year=24)
|
80 |
+
export_parquet(OUTPUT_CSV_PATH, OUTPUT_PARQUET_PATH)
|