{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import spacy\n", "import pandas as pd\n", "import glob\n", "import tqdm" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "17\n" ] } ], "source": [ "files = glob.glob(\"Database/*/CONLL/*.xlsx\")\n", "print(len(files))" ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [], "source": [ "def group_acts(df):\n", " if \"ACT\" in df.columns:\n", " group_col = \"ACT\"\n", " elif \"Original_Act_ID\" in df.columns:\n", " group_col = \"Original_Act_ID\"\n", " else:\n", " \"unknown\"\n", "\n", " if \"LANG\" in df.columns:\n", " df = df[df[\"LANG\"] == \"LAT\"]\n", "\n", " df['Word_x'] = df['Word_x'].astype(str).str.strip()\n", " \n", " # Combine words into sentences, assumed by unique 'Line_ID'\n", " grouped_data = df.groupby(group_col)\n", "\n", " return grouped_data\n", "\n", "def create_spacy_doc(df, nlp):\n", " grouped_df = group_acts(df)\n", " docs = []\n", " \n", " for key, group in grouped_df:\n", " tokens = []\n", " spaces = []\n", " entities = []\n", " current_entity = None\n", "\n", " # Iterate over the rows in the group\n", " for i, row in enumerate(group.itertuples()):\n", " word = row.Word_x\n", " length_word = len(word)\n", " # Check if there is a next word and whether it should be followed by a space\n", " space_after = not (i < len(group) - 1 and group.iloc[i + 1].Word_x in [',', '.', ';', ':'])\n", " tokens.append(word)\n", " spaces.append(space_after)\n", "\n", " # Handle entity recognition\n", " if row.PERS_x != 'O':\n", " entity_type = 'PERSON'\n", " elif row.LOC_x != 'O':\n", " entity_type = 'LOC'\n", " else:\n", " entity_type = None\n", "\n", " if current_entity is None and entity_type is not None:\n", " # Start new entity\n", " current_entity = [i, i, entity_type]\n", " elif current_entity is not None:\n", " if entity_type == current_entity[2]:\n", " # Extend current entity\n", " current_entity[1] = i\n", " else:\n", " # Finish current entity and add to entities list\n", " entities.append(current_entity)\n", " current_entity = [i, i, entity_type] if entity_type else None\n", "\n", " # Check if an entity is still open at the end of the group\n", " if current_entity is not None:\n", " entities.append(current_entity)\n", "\n", " # Create a spacy Doc object\n", " doc = spacy.tokens.Doc(nlp.vocab, words=tokens, spaces=spaces)\n", " # Get the sentencizer component from the pipeline\n", " sentencizer = nlp.get_pipe(\"sentencizer\")\n", "\n", " # Apply the sentencizer to the Doc\n", " sentencizer(doc)\n", " # Create Span objects for the entities\n", " spans = [doc.char_span(doc[ent[0]].idx, doc[ent[1]].idx + len(doc[ent[1]].text), label=ent[2])\n", " for ent in entities if doc.char_span(doc[ent[0]].idx, doc[ent[1]].idx + len(doc[ent[1]].text), label=ent[2])]\n", " doc.spans[\"sc\"] = spans\n", " \n", "\n", " docs.append(doc)\n", "\n", " return docs\n" ] }, { "cell_type": "code", "execution_count": 27, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ " 0%| | 0/17 [00:00= sent.start and span.end <= sent.end\n", " ]\n", " \n", " # Check if there are entities in the sentence, if yes, add to hf_docs_sents\n", " if sent_spans:\n", " hf_docs_sents.append({\n", " \"tokenized_text\": [token.text for token in sent],\n", " \"spans\": sent_spans,\n", " \"ms\": {} # Assuming 'ms' should be some metadata, define or update accordingly\n", " })\n", "hf_docs[0]" ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "15141" ] }, "execution_count": 33, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(hf_docs_sents)" ] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [], "source": [ "import srsly" ] }, { "cell_type": "code", "execution_count": 35, "metadata": {}, "outputs": [], "source": [ "srsly.write_jsonl(\"home-alcar-ner.jsonl\", hf_docs)" ] }, { "cell_type": "code", "execution_count": 39, "metadata": {}, "outputs": [], "source": [ "srsly.write_jsonl(\"home-alcar-ner-sents.jsonl\", hf_docs_sents)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "bow", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 2 }