import os | |
import json | |
import datasets | |
from datasets import Dataset, DatasetDict, load_dataset, Features, Value, Image | |
# Define the paths to your dataset | |
image_root_dir = "./" | |
train_jsonl_file_path = "arabic_memes_categorization_train.jsonl" | |
dev_jsonl_file_path = "arabic_memes_categorization_dev.jsonl" | |
test_jsonl_file_path = "arabic_memes_categorization_test.jsonl" | |
# Define features for the dataset | |
features = Features({ | |
'id': Value('string'), | |
'text': Value('string'), | |
'image': Image(), | |
'img_path': Value('string') | |
}) | |
# Function to load each dataset split | |
def load_armeme_split(jsonl_file_path, image_root_dir): | |
data = [] | |
# Load JSONL file | |
with open(jsonl_file_path, 'r') as f: | |
for line in f: | |
item = json.loads(line) | |
# Update image path to absolute path | |
item['img_path'] = os.path.join(image_root_dir, item['img_path']) | |
data.append(item) | |
# Create a Hugging Face dataset | |
dataset = Dataset.from_dict(data, features=features) | |
return dataset | |
# Load each split | |
train_dataset = load_armeme_split(train_jsonl_file_path, image_root_dir) | |
dev_dataset = load_armeme_split(dev_jsonl_file_path, image_root_dir) | |
test_dataset = load_armeme_split(test_jsonl_file_path, image_root_dir) | |
# Create a DatasetDict | |
dataset_dict = DatasetDict({ | |
'train': train_dataset, | |
'dev': dev_dataset, | |
'test': test_dataset | |
}) | |
# Push the dataset to Hugging Face Hub | |
dataset_dict.push_to_hub("QCRI/ArMeme", license="CC-By-NC-SA-4.0") | |