File size: 3,988 Bytes
27ee354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "3ab2e823-50c9-40d4-9401-3ed7869da6e2",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "241ea0f7-02bf-4a3e-845c-e262b1d32031",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Use specific revision for reproducibility!\n",
    "# See https://huggingface.co/datasets/avramandrei/histnero\n",
    "revision = \"433ca166efac28c952813c0e78bf301643cf5af3\"\n",
    "\n",
    "ds = load_dataset(\"avramandrei/histnero\", revision=revision)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "66878e9e-83e8-4010-b81c-cefbc2ef0da7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# We are grouping together documents together first!\n",
    "def perform_document_grouping(dataset_split):\n",
    "    # Document identifier -> Training example\n",
    "    document_mapping = {}\n",
    "\n",
    "    for document in dataset_split:\n",
    "        doc_id = document[\"doc_id\"]\n",
    "        if doc_id in document_mapping:\n",
    "            document_mapping[doc_id].append(document)\n",
    "        else:\n",
    "            document_mapping[doc_id] = [document]\n",
    "    return document_mapping\n",
    "\n",
    "def export_to_conll(grouped_dataset_split, export_filename):\n",
    "    dataset_labels = ds[\"train\"].features[\"ner_tags\"].feature.names\n",
    "    dataset_label_id_to_string = {idx: label_string for idx, label_string in enumerate(dataset_labels)}\n",
    "\n",
    "    with open(export_filename, \"wt\") as f_out:\n",
    "        for document_name, training_examples in grouped_dataset_split.items():\n",
    "            f_out.write(\"-DOCSTART-\\tO\\n\\n\")\n",
    "\n",
    "            for training_example in training_examples:\n",
    "                tokens = training_example[\"tokens\"]\n",
    "                ner_label_ids = training_example[\"ner_tags\"]\n",
    "                ner_label_iobs = [dataset_label_id_to_string[ner_label_id] for ner_label_id in ner_label_ids]\n",
    "\n",
    "                assert len(tokens) == len(ner_label_iobs)\n",
    "\n",
    "                # Write some metadata first\n",
    "                metadata = [\n",
    "                    {\"id\": training_example[\"id\"]},\n",
    "                    {\"doc_id\": training_example[\"doc_id\"]},\n",
    "                    {\"region\": training_example[\"region\"]},\n",
    "                ]\n",
    "\n",
    "                for metadata_entry in metadata:\n",
    "                    for metadata_name, metadata_value in metadata_entry.items():\n",
    "                        f_out.write(f\"# histnero:{metadata_name} = {metadata_value}\\n\")\n",
    "                \n",
    "                for token, ner_label_iob in zip(tokens, ner_label_iobs):\n",
    "                    f_out.write(f\"{token}\\t{ner_label_iob}\\n\")\n",
    "\n",
    "                f_out.write(\"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "afb1dc77-1cde-43d5-9d9a-e7b458c08bb5",
   "metadata": {},
   "outputs": [],
   "source": [
    "for dataset_split in [\"train\", \"valid\", \"test\"]:\n",
    "    grouped_dataset = perform_document_grouping(ds[dataset_split])\n",
    "\n",
    "    split_filename = \"dev\" if dataset_split == \"valid\" else dataset_split\n",
    "    export_to_conll(grouped_dataset, f\"{split_filename}.tsv\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}