Datasets:
JSON ERROR in loading files of v1_6-sample using load_dataset
#22
by
sakurapeng
- opened
Error:
raise JSONDecodeError("Extra data", s, end)
json.decoder.JSONDecodeError: Extra data: line 2 column 1 (char 1469)
File: v1_5r2_sample-0097.json.gz (there is no problem loading the previous files
Test code: (no problem to load directly using gzip
import gzip
import json
from datasets import load_dataset
def read_gz_file(path):
with gzip.open(path, 'rt') as f:
for line in f:
yield json.loads(line)
def load(path, data_file):
ds = load_dataset(path, data_files=data_file, split="train", streaming=True)
for example_i, example in enumerate(ds):
print(example_i)
print(example)
break
def main():
#path = "/data/dolma/v1_6-sample/v1_5r2_sample-0097.json.gz" #
#for data in read_gz_file(path):
# print(data)
# break
path = "/data/dolma/v1_6-sample"
data_file = ["v1_5r2_sample-0097.json.gz"]
load(path, data_file)
if __name__ == "__main__":
main()
Given that certain rows within this file exhibit varying key-value types—wherein "created" keys hold integer values while others store string values
You can use the following code to revise the "v1_5r2_sample-0097.json.gz"
import gzip
import json
def read_gz_file(path):
"""
Read a gzipped JSON file line by line, yielding each JSON object.
"""
with gzip.open(path, 'rt') as f:
for line in f:
yield json.loads(line)
def check_data_types(data):
"""
Check and print inconsistencies in data types within the data,
specifically looking at the 'created' field to identify type changes.
"""
# Assuming 'created' field might have inconsistent types
expected_type = None
for i, item in enumerate(data):
# Get the type of the 'created' field
current_type = type(item.get("created"))
if expected_type is None:
expected_type = current_type
elif current_type != expected_type:
print(f"Inconsistency in type: At line {i+1}, 'created' field type changed from {expected_type} to {current_type}.")
def replace_missing_with_zero(data):
"""
Replace missing 'created' values with 0.
"""
for item in data:
if item.get('created') is None:
item['created'] = 0
return data
def write_to_gz_file(data, path):
"""
Write data to a JSON file, and then gzip compress it. The data is written line by line as JSON strings.
"""
with open(path, 'w', encoding='utf-8') as f:
for item in data:
f.write(json.dumps(item) + '\n')
if __name__ == "__main__":
# Read data from a gzipped file
path = "./data/dolma_data/v1_6-sample/v1_5r2_sample-0097.json.gz"
data = read_gz_file(path)
# Temporary output path for the processed data before compression
temp_output_path = "./data/dolma_data/v1_6-sample/v1_5r2_sample-0097_v2_processed.json"
# Replace missing 'created' values with 0
processed_data = replace_missing_with_zero(data)
# Write the processed data to a new file
write_to_gz_file(processed_data, temp_output_path)
# gzip your file
# Attempt to read the processed and supposedly gzipped file
new_path = "./data/dolma_data/v1_6-sample/v1_5r2_sample-0097_v2_processed.json.gz"
data = read_gz_file(new_path)
# Check data types and handle exceptions
try:
check_data_types(data)
except json.JSONDecodeError as e:
print(f"JSON parsing error: {e}")
except Exception as e:
print(f"Error processing data: {e}")
sakurapeng
changed discussion status to
closed