{ "cells": [ { "cell_type": "code", "execution_count": 23, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 7440/7440 [02:42<00:00, 45.75it/s] \n" ] } ], "source": [ "import json\n", "import os\n", "from tqdm import tqdm\n", "from pathlib import Path\n", "\n", "\n", "split = \"train\" # \"test\", \"eval\"\n", "\n", "def load_json_files(filepath):\n", " with open(filepath, 'r') as f:\n", " data = json.load(f)\n", " return data\n", "\n", "\n", "folders = [os.path.join('dataset', split, f) for f in os.listdir('dataset/'+ split )]\n", "data = []\n", "for folderpath in tqdm(folders):\n", " if not os.path.isdir(folderpath):\n", " continue\n", " files = [os.path.join(folderpath, f) for f in os.listdir(folderpath)]\n", " for filepath in files:\n", " if filepath.endswith('.json'):\n", " \n", " data.append({\"id\": str(Path(filepath).stem), **load_json_files(filepath)})\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 624022/624022 [00:11<00:00, 53122.61it/s]\n" ] } ], "source": [ "\n", "results = []\n", "for item in tqdm(data):\n", " # Focal class\n", " focal_class = \"class\" + \" \" + item[\"focal_class\"][\"identifier\"]\n", "\n", " if item[\"focal_class\"][\"superclass\"]:\n", " focal_class += \" \" + item[\"focal_class\"][\"superclass\"]\n", "\n", " if item[\"focal_class\"][\"interfaces\"]:\n", " focal_class += \" \" + item[\"focal_class\"][\"interfaces\"]\n", "\n", " focal_class += \" {\"\n", "\n", " indent = item[\"focal_method\"][\"body\"].split(\"\\n\")[-1][:-1]\n", "\n", " # Focal method\n", " focal_method = indent + item[\"focal_method\"][\"body\"]\n", "\n", " # Constructors\n", " constructors = []\n", " for method in item[\"focal_class\"][\"methods\"]:\n", " if method[\"constructor\"]:\n", " constructor = indent + method[\"full_signature\"] + \";\"\n", " constructors.append(constructor)\n", "\n", " # Methods\n", " methods = []\n", " for method in item[\"focal_class\"][\"methods\"]:\n", " if item[\"focal_method\"][\"full_signature\"] == method[\"full_signature\"]:\n", " continue\n", " #if method[\"testcase\"]:\n", " # continue\n", "\n", " if not method[\"constructor\"]:\n", " method_code = indent + method[\"full_signature\"] + \";\"\n", " methods.append(method_code)\n", "\n", " # Fields\n", " fields = []\n", " for field in item[\"focal_class\"][\"fields\"]:\n", " field_code = indent\n", " field_code += field[\"modifier\"] + \" \" + field[\"type\"] + \" \" + field[\"var_name\"] + \";\"\n", " fields.append(field_code)\n", "\n", "\n", " # TEST\n", " # Test class\n", " test_class = \"class\" + \" \" + item[\"test_class\"][\"identifier\"]\n", "\n", " if item[\"test_class\"][\"superclass\"]:\n", " test_class += \" \" + item[\"focal_class\"][\"superclass\"]\n", "\n", " if item[\"test_class\"][\"interfaces\"]:\n", " test_class += \" \" + item[\"focal_class\"][\"interfaces\"]\n", "\n", " test_class += \" {\"\n", "\n", " indent = item[\"test_case\"][\"body\"].split(\"\\n\")[-1][:-1]\n", " # Test case\n", " fields = []\n", " for field in item[\"test_class\"][\"fields\"]:\n", " field_code = indent\n", " field_code += field[\"modifier\"] + \" \" + field[\"type\"] + \" \" + field[\"var_name\"] + \";\"\n", " fields.append(field_code)\n", " test_case = indent + item[\"test_case\"][\"body\"]\n", "\n", " d = {\n", " 'id': item['id'],\n", " 't': test_case,\n", " 't_tc': \"\\n\\n\".join(filter(None, [test_class, \"\\n\".join(fields), test_case, \"}\"])),\n", " 'fm': focal_method,\n", " 'fm_fc': \"\\n\\n\".join(filter(None, [focal_class, focal_method, \"}\"])),\n", " 'fm_fc_c': \"\\n\\n\".join(filter(None, [focal_class, focal_method, \"\\n\".join(constructors), \"}\"])),\n", " 'fm_fc_c_m': \"\\n\\n\".join(filter(None, [focal_class, focal_method, \"\\n\".join(constructors), \"\\n\".join(methods), \"}\"])),\n", " 'fm_fc_c_m_f': \"\\n\\n\".join(filter(None, [focal_class, focal_method, \"\\n\".join(constructors), \"\\n\".join(methods), \"\\n\".join(fields), \"}\"])),\n", " }\n", " results.append(d)\n", "\n" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [], "source": [ "# sort by id\n", "results_sorted = sorted(results, key=lambda k: int(k['id']))" ] }, { "cell_type": "code", "execution_count": 27, "metadata": {}, "outputs": [], "source": [ "from datasets import Dataset\n", "train_dataset = Dataset.from_list(results_sorted)" ] }, { "cell_type": "code", "execution_count": 28, "metadata": {}, "outputs": [], "source": [ "from datasets import DatasetDict\n", "\n", "dataset_dict = DatasetDict({'train': train_dataset, 'test': test_dataset, 'validation': eval_dataset})" ] }, { "cell_type": "code", "execution_count": 30, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Creating parquet from Arrow format: 100%|██████████| 57/57 [00:00<00:00, 134.38ba/s]\n", "Creating parquet from Arrow format: 100%|██████████| 57/57 [00:00<00:00, 143.75ba/s]\n", "Creating parquet from Arrow format: 100%|██████████| 57/57 [00:00<00:00, 140.60ba/s]\n", "Creating parquet from Arrow format: 100%|██████████| 57/57 [00:00<00:00, 150.07ba/s]\n", "Creating parquet from Arrow format: 100%|██████████| 57/57 [00:00<00:00, 164.71ba/s]\n", "Creating parquet from Arrow format: 100%|██████████| 57/57 [00:00<00:00, 157.03ba/s]\n", "Creating parquet from Arrow format: 100%|██████████| 57/57 [00:00<00:00, 146.53ba/s]\n", "Creating parquet from Arrow format: 100%|██████████| 57/57 [00:00<00:00, 154.88ba/s]\n", "Creating parquet from Arrow format: 100%|██████████| 57/57 [00:00<00:00, 151.50ba/s]\n", "Creating parquet from Arrow format: 100%|██████████| 57/57 [00:00<00:00, 144.52ba/s]\n", "Creating parquet from Arrow format: 100%|██████████| 57/57 [00:00<00:00, 145.44ba/s]\n", "Uploading the dataset shards: 100%|██████████| 11/11 [01:26<00:00, 7.90s/it]\n", "Creating parquet from Arrow format: 100%|██████████| 40/40 [00:00<00:00, 155.49ba/s]\n", "Creating parquet from Arrow format: 100%|██████████| 40/40 [00:00<00:00, 138.47ba/s]\n", "Uploading the dataset shards: 100%|██████████| 2/2 [00:11<00:00, 5.58s/it]\n", "Creating parquet from Arrow format: 100%|██████████| 40/40 [00:00<00:00, 145.22ba/s]\n", "Creating parquet from Arrow format: 100%|██████████| 40/40 [00:00<00:00, 139.08ba/s]\n", "Uploading the dataset shards: 100%|██████████| 2/2 [00:12<00:00, 6.28s/it]\n", "README.md: 100%|██████████| 21.0/21.0 [00:00<00:00, 8.60kB/s]\n" ] } ], "source": [ "dataset_dict.push_to_hub('andstor/methods2test')" ] } ], "metadata": { "kernelspec": { "display_name": ".venv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.13" } }, "nbformat": 4, "nbformat_minor": 2 }