{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "2FzBzmpBRkV3"
},
"source": [
"# Checking Embeddings of Terms (Noun/Verb/Adj/etc.) from Tagged Wordnet Gloss\n",
"\n",
"I discovered there's a more active fork of wordnet and bumped this analysis over to that."
]
},
{
"cell_type": "code",
"source": [
"!pip install datasets"
],
"metadata": {
"id": "K5C1kaWhXnJf",
"outputId": "5b4045f0-9aa2-4579-d52c-4f45e1d67180",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Requirement already satisfied: datasets in /usr/local/lib/python3.10/dist-packages (2.18.0)\n",
"Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from datasets) (3.13.1)\n",
"Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from datasets) (1.25.2)\n",
"Requirement already satisfied: pyarrow>=12.0.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (14.0.2)\n",
"Requirement already satisfied: pyarrow-hotfix in /usr/local/lib/python3.10/dist-packages (from datasets) (0.6)\n",
"Requirement already satisfied: dill<0.3.9,>=0.3.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (0.3.8)\n",
"Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from datasets) (1.5.3)\n",
"Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (2.31.0)\n",
"Requirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.10/dist-packages (from datasets) (4.66.2)\n",
"Requirement already satisfied: xxhash in /usr/local/lib/python3.10/dist-packages (from datasets) (3.4.1)\n",
"Requirement already satisfied: multiprocess in /usr/local/lib/python3.10/dist-packages (from datasets) (0.70.16)\n",
"Requirement already satisfied: fsspec[http]<=2024.2.0,>=2023.1.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (2023.6.0)\n",
"Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from datasets) (3.9.3)\n",
"Requirement already satisfied: huggingface-hub>=0.19.4 in /usr/local/lib/python3.10/dist-packages (from datasets) (0.20.3)\n",
"Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from datasets) (24.0)\n",
"Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from datasets) (6.0.1)\n",
"Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (1.3.1)\n",
"Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (23.2.0)\n",
"Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (1.4.1)\n",
"Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (6.0.5)\n",
"Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (1.9.4)\n",
"Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (4.0.3)\n",
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub>=0.19.4->datasets) (4.10.0)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets) (3.3.2)\n",
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets) (3.6)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets) (2.0.7)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets) (2024.2.2)\n",
"Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets) (2.8.2)\n",
"Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets) (2023.4)\n",
"Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.1->pandas->datasets) (1.16.0)\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"from datasets import load_dataset\n",
"\n",
"# Load the dataset\n",
"dataset = load_dataset(\"jon-tow/open-english-wordnet-synset-2023\")"
],
"metadata": {
"id": "n12stD5MRnek"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "_D-Y5nf6RkV4",
"outputId": "a205d054-7fab-477d-eddb-9be56942891c",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"{'@id': 'oewn-03159292-a',\n",
" '@ili': 'i18097',\n",
" '@members': 'oewn-avenged-a',\n",
" '@partOfSpeech': 'a',\n",
" '@lexfile': 'adj.ppl',\n",
" 'Definition': 'for which vengeance has been taken',\n",
" 'SynsetRelation': [],\n",
" 'Example': 'an avenged injury',\n",
" 'ILIDefinition': None,\n",
" '@dc:source': None}"
]
},
"metadata": {},
"execution_count": 40
}
],
"source": [
"dataset['train'][0]"
]
},
{
"cell_type": "code",
"source": [
"import pandas as pd"
],
"metadata": {
"id": "ioCtYnx7gDo6"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"df = pd.DataFrame(dataset['train'])"
],
"metadata": {
"id": "g6voyMIugE4c"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"df.head()"
],
"metadata": {
"id": "WNmdjublgIXz",
"outputId": "90ff3c7f-7ac6-4f59-df79-c96b6a5f75ad",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 206
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" @id @ili @members @partOfSpeech \\\n",
"0 oewn-03159292-a i18097 oewn-avenged-a a \n",
"1 oewn-03159419-a i18098 oewn-unavenged-a a \n",
"2 oewn-03159554-a i18099 oewn-beaten-a a \n",
"3 oewn-03159654-a i18100 oewn-calibrated-a oewn-graduated-a a \n",
"4 oewn-03159804-a i18101 oewn-cantering-a a \n",
"\n",
" @lexfile Definition SynsetRelation \\\n",
"0 adj.ppl for which vengeance has been taken [] \n",
"1 adj.ppl for which vengeance has not been taken [] \n",
"2 adj.ppl formed or made thin by hammering [] \n",
"3 adj.ppl marked with or divided into degrees [] \n",
"4 adj.ppl riding at a gait between a trot and a gallop [] \n",
"\n",
" Example ILIDefinition @dc:source \n",
"0 an avenged injury None None \n",
"1 an unavenged murder None None \n",
"2 beaten gold None None \n",
"3 a calibrated thermometer None None \n",
"4 the cantering soldiers None None "
],
"text/html": [
"\n",
"
\n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" @id | \n",
" @ili | \n",
" @members | \n",
" @partOfSpeech | \n",
" @lexfile | \n",
" Definition | \n",
" SynsetRelation | \n",
" Example | \n",
" ILIDefinition | \n",
" @dc:source | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" oewn-03159292-a | \n",
" i18097 | \n",
" oewn-avenged-a | \n",
" a | \n",
" adj.ppl | \n",
" for which vengeance has been taken | \n",
" [] | \n",
" an avenged injury | \n",
" None | \n",
" None | \n",
"
\n",
" \n",
" 1 | \n",
" oewn-03159419-a | \n",
" i18098 | \n",
" oewn-unavenged-a | \n",
" a | \n",
" adj.ppl | \n",
" for which vengeance has not been taken | \n",
" [] | \n",
" an unavenged murder | \n",
" None | \n",
" None | \n",
"
\n",
" \n",
" 2 | \n",
" oewn-03159554-a | \n",
" i18099 | \n",
" oewn-beaten-a | \n",
" a | \n",
" adj.ppl | \n",
" formed or made thin by hammering | \n",
" [] | \n",
" beaten gold | \n",
" None | \n",
" None | \n",
"
\n",
" \n",
" 3 | \n",
" oewn-03159654-a | \n",
" i18100 | \n",
" oewn-calibrated-a oewn-graduated-a | \n",
" a | \n",
" adj.ppl | \n",
" marked with or divided into degrees | \n",
" [] | \n",
" a calibrated thermometer | \n",
" None | \n",
" None | \n",
"
\n",
" \n",
" 4 | \n",
" oewn-03159804-a | \n",
" i18101 | \n",
" oewn-cantering-a | \n",
" a | \n",
" adj.ppl | \n",
" riding at a gait between a trot and a gallop | \n",
" [] | \n",
" the cantering soldiers | \n",
" None | \n",
" None | \n",
"
\n",
" \n",
"
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"variable_name": "df"
}
},
"metadata": {},
"execution_count": 43
}
]
},
{
"cell_type": "markdown",
"source": [
"Getting the @members into a reasonable format is about to take a bunch of cells and most of my patience for the day."
],
"metadata": {
"id": "vJM-9DJE1Oaq"
}
},
{
"cell_type": "code",
"source": [
"df.shape"
],
"metadata": {
"id": "L9zrnh6Urqco",
"outputId": "1ace16a7-578a-4fa3-b474-5379d2a10248",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"(120135, 10)"
]
},
"metadata": {},
"execution_count": 44
}
]
},
{
"cell_type": "code",
"source": [
"df[['@members', '@partOfSpeech', '@lexfile']].head()"
],
"metadata": {
"id": "z8c4VmJ6lYwa",
"outputId": "c0446594-dfcb-4681-95c9-0028efa128a1",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 206
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" @members @partOfSpeech @lexfile\n",
"0 oewn-avenged-a a adj.ppl\n",
"1 oewn-unavenged-a a adj.ppl\n",
"2 oewn-beaten-a a adj.ppl\n",
"3 oewn-calibrated-a oewn-graduated-a a adj.ppl\n",
"4 oewn-cantering-a a adj.ppl"
],
"text/html": [
"\n",
" \n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" @members | \n",
" @partOfSpeech | \n",
" @lexfile | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" oewn-avenged-a | \n",
" a | \n",
" adj.ppl | \n",
"
\n",
" \n",
" 1 | \n",
" oewn-unavenged-a | \n",
" a | \n",
" adj.ppl | \n",
"
\n",
" \n",
" 2 | \n",
" oewn-beaten-a | \n",
" a | \n",
" adj.ppl | \n",
"
\n",
" \n",
" 3 | \n",
" oewn-calibrated-a oewn-graduated-a | \n",
" a | \n",
" adj.ppl | \n",
"
\n",
" \n",
" 4 | \n",
" oewn-cantering-a | \n",
" a | \n",
" adj.ppl | \n",
"
\n",
" \n",
"
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"summary": "{\n \"name\": \"df[['@members', '@partOfSpeech', '@lexfile']]\",\n \"rows\": 5,\n \"fields\": [\n {\n \"column\": \"@members\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 5,\n \"samples\": [\n \"oewn-unavenged-a\",\n \"oewn-cantering-a\",\n \"oewn-beaten-a\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"@partOfSpeech\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 1,\n \"samples\": [\n \"a\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"@lexfile\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 1,\n \"samples\": [\n \"adj.ppl\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}"
}
},
"metadata": {},
"execution_count": 45
}
]
},
{
"cell_type": "code",
"source": [
"df = df[['@members', '@partOfSpeech', '@lexfile']]"
],
"metadata": {
"id": "bobEK-ZsllHr"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"pattern = r'^(\\w+-\\w+-\\w+ *)*$'\n",
"\n",
"matches_pattern = df['@members'].str.match(pattern)\n",
"\n",
"all_match_pattern = matches_pattern.all()\n",
"all_match_pattern"
],
"metadata": {
"id": "LqERc6pcFyao",
"outputId": "1fd3bad6-f639-49cf-ca84-a23003616511",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"False"
]
},
"metadata": {},
"execution_count": 47
}
]
},
{
"cell_type": "code",
"source": [
"members_not_matching_pattern = df[~matches_pattern]\n",
"members_not_matching_pattern"
],
"metadata": {
"id": "jLvqtPRvGeqN",
"outputId": "25982f3b-050e-427d-c953-e91fc7e1ed35",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 423
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" @members @partOfSpeech \\\n",
"13 oewn-hand-held-a oewn-handheld-a a \n",
"42 oewn-re-created-a a \n",
"49 oewn-spray-dried-a a \n",
"57 oewn-closed-captioned-a a \n",
"116 oewn-plane_figure-n oewn-two-dimensional_figure-n n \n",
"... ... ... \n",
"119954 oewn-real-time_processing-n oewn-real-time_ope... n \n",
"119976 oewn-reuptake-n oewn-re-uptake-n n \n",
"120005 oewn-slump-n oewn-slack-n oewn-drop-off-n oewn... n \n",
"120123 oewn-constant-volume_process-n oewn-isometric_... n \n",
"120127 oewn-anti-selection-n oewn-adverse_selection-n n \n",
"\n",
" @lexfile \n",
"13 adj.ppl \n",
"42 adj.ppl \n",
"49 adj.ppl \n",
"57 adj.ppl \n",
"116 noun.shape \n",
"... ... \n",
"119954 noun.process \n",
"119976 noun.process \n",
"120005 noun.process \n",
"120123 noun.process \n",
"120127 noun.process \n",
"\n",
"[6987 rows x 3 columns]"
],
"text/html": [
"\n",
" \n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" @members | \n",
" @partOfSpeech | \n",
" @lexfile | \n",
"
\n",
" \n",
" \n",
" \n",
" 13 | \n",
" oewn-hand-held-a oewn-handheld-a | \n",
" a | \n",
" adj.ppl | \n",
"
\n",
" \n",
" 42 | \n",
" oewn-re-created-a | \n",
" a | \n",
" adj.ppl | \n",
"
\n",
" \n",
" 49 | \n",
" oewn-spray-dried-a | \n",
" a | \n",
" adj.ppl | \n",
"
\n",
" \n",
" 57 | \n",
" oewn-closed-captioned-a | \n",
" a | \n",
" adj.ppl | \n",
"
\n",
" \n",
" 116 | \n",
" oewn-plane_figure-n oewn-two-dimensional_figure-n | \n",
" n | \n",
" noun.shape | \n",
"
\n",
" \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
"
\n",
" \n",
" 119954 | \n",
" oewn-real-time_processing-n oewn-real-time_ope... | \n",
" n | \n",
" noun.process | \n",
"
\n",
" \n",
" 119976 | \n",
" oewn-reuptake-n oewn-re-uptake-n | \n",
" n | \n",
" noun.process | \n",
"
\n",
" \n",
" 120005 | \n",
" oewn-slump-n oewn-slack-n oewn-drop-off-n oewn... | \n",
" n | \n",
" noun.process | \n",
"
\n",
" \n",
" 120123 | \n",
" oewn-constant-volume_process-n oewn-isometric_... | \n",
" n | \n",
" noun.process | \n",
"
\n",
" \n",
" 120127 | \n",
" oewn-anti-selection-n oewn-adverse_selection-n | \n",
" n | \n",
" noun.process | \n",
"
\n",
" \n",
"
\n",
"
6987 rows × 3 columns
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"variable_name": "members_not_matching_pattern",
"summary": "{\n \"name\": \"members_not_matching_pattern\",\n \"rows\": 6987,\n \"fields\": [\n {\n \"column\": \"@members\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 6837,\n \"samples\": [\n \"oewn-ichthyolatry-n oewn-fish-worship-n\",\n \"oewn-record-breaker-n oewn-record-holder-n\",\n \"oewn-green-white-a oewn-greenish-white-a\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"@partOfSpeech\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 5,\n \"samples\": [\n \"n\",\n \"r\",\n \"s\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"@lexfile\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 43,\n \"samples\": [\n \"noun.possession\",\n \"noun.substance\",\n \"noun.location\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}"
}
},
"metadata": {},
"execution_count": 48
}
]
},
{
"cell_type": "code",
"source": [
"pattern = r'^(\\w+-[\\w_\\-.]+-\\w+ *)*$' # This took a couple of iterations not represented\n",
"\n",
"matches_pattern = df['@members'].str.match(pattern)\n",
"\n",
"all_match_pattern = matches_pattern.all()\n",
"all_match_pattern"
],
"metadata": {
"id": "m8Jo9CRfOANO",
"outputId": "ea4f54f7-1d25-4fc3-9ba8-70ae5000dae1",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"True"
]
},
"metadata": {},
"execution_count": 49
}
]
},
{
"cell_type": "code",
"source": [
"# I found this problem later on\n",
"oddball = df['@members'].str.match('.*Gravenhage.*')\n",
"oddball_member = df[oddball]['@members']"
],
"metadata": {
"id": "jsFdSy6qmhmG"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"oddball_member.iloc[0]"
],
"metadata": {
"id": "AzVbuHXGnSkK",
"outputId": "ca09912d-6b62-405e-b4bb-54eb62d4ab70",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 35
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"'oewn-The_Hague-n oewn--ap-s_Gravenhage-n oewn-Den_Haag-n'"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "string"
}
},
"metadata": {},
"execution_count": 51
}
]
},
{
"cell_type": "code",
"source": [
"df = df.assign(members=df['@members'].str.split()).explode('members')"
],
"metadata": {
"id": "J9uuPTGBqeVe"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"df.head()"
],
"metadata": {
"id": "rNbH6RL4rRPG",
"outputId": "b37b227f-bfe7-4420-af53-613d1ddc204e",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 206
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" @members @partOfSpeech @lexfile \\\n",
"0 oewn-avenged-a a adj.ppl \n",
"1 oewn-unavenged-a a adj.ppl \n",
"2 oewn-beaten-a a adj.ppl \n",
"3 oewn-calibrated-a oewn-graduated-a a adj.ppl \n",
"3 oewn-calibrated-a oewn-graduated-a a adj.ppl \n",
"\n",
" members \n",
"0 oewn-avenged-a \n",
"1 oewn-unavenged-a \n",
"2 oewn-beaten-a \n",
"3 oewn-calibrated-a \n",
"3 oewn-graduated-a "
],
"text/html": [
"\n",
" \n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" @members | \n",
" @partOfSpeech | \n",
" @lexfile | \n",
" members | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" oewn-avenged-a | \n",
" a | \n",
" adj.ppl | \n",
" oewn-avenged-a | \n",
"
\n",
" \n",
" 1 | \n",
" oewn-unavenged-a | \n",
" a | \n",
" adj.ppl | \n",
" oewn-unavenged-a | \n",
"
\n",
" \n",
" 2 | \n",
" oewn-beaten-a | \n",
" a | \n",
" adj.ppl | \n",
" oewn-beaten-a | \n",
"
\n",
" \n",
" 3 | \n",
" oewn-calibrated-a oewn-graduated-a | \n",
" a | \n",
" adj.ppl | \n",
" oewn-calibrated-a | \n",
"
\n",
" \n",
" 3 | \n",
" oewn-calibrated-a oewn-graduated-a | \n",
" a | \n",
" adj.ppl | \n",
" oewn-graduated-a | \n",
"
\n",
" \n",
"
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"variable_name": "df"
}
},
"metadata": {},
"execution_count": 53
}
]
},
{
"cell_type": "code",
"source": [
"df.shape"
],
"metadata": {
"id": "TTdfz_HkrT4r",
"outputId": "35d0ad6f-1b81-4eb6-988d-effd68e5e4b4",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"(212071, 4)"
]
},
"metadata": {},
"execution_count": 54
}
]
},
{
"cell_type": "code",
"source": [
"prefixes = df['members'].str.split('-', expand=True)[0]\n",
"prefix_freq = prefixes.value_counts().reset_index()\n",
"prefix_freq.columns = ['Prefix', 'Frequency']\n",
"\n",
"prefix_freq = prefix_freq.sort_values(by='Frequency', ascending=False)\n",
"\n",
"print(prefix_freq)"
],
"metadata": {
"id": "A6zydv3gq4IZ",
"outputId": "ea147541-f0c2-4dcc-f51c-1060507ab527",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
" Prefix Frequency\n",
"0 oewn 212071\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"to_remove = 'oewn-'\n",
"\n",
"df['members'] = df['members'].apply(lambda x: x.replace(to_remove, '') if x.startswith(to_remove) else x)"
],
"metadata": {
"id": "orcYSJC-rL_d"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"prefixes = df['members'].str.split('-', expand=True)[0]\n",
"prefix_freq = prefixes.value_counts().reset_index()\n",
"prefix_freq.columns = ['Prefix', 'Frequency']\n",
"\n",
"prefix_freq = prefix_freq.sort_values(by='Frequency', ascending=False)\n",
"\n",
"print(prefix_freq)"
],
"metadata": {
"id": "Zne276Jardwg",
"outputId": "b0d9aacb-2504-4e66-96cb-f8b8d4d72ea3",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
" Prefix Frequency\n",
"0 self 252\n",
"1 high 102\n",
"2 well 98\n",
"3 one 85\n",
"4 cut 79\n",
"... ... ...\n",
"66494 CIA 1\n",
"66493 National_Institute_of_Standards_and_Technology 1\n",
"66492 Counterterrorist_Center 1\n",
"66491 Nonproliferation_Center 1\n",
"145809 grammatical_cohesion 1\n",
"\n",
"[145810 rows x 2 columns]\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# Check for values starting with \"-\"\n",
"values_starting_with_dash = df[df['members'].str.startswith('-')]\n",
"\n",
"# Display the values starting with \"-\"\n",
"print(values_starting_with_dash)"
],
"metadata": {
"id": "sk2wdpTRsKhT",
"outputId": "e058e104-1b4d-463e-e046-8a550f8984c6",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
" @members @partOfSpeech \\\n",
"81633 oewn-The_Hague-n oewn--ap-s_Gravenhage-n oewn-... n \n",
"106115 oewn-between-r oewn--ap-tween-r r \n",
"107858 oewn-between_decks-r oewn--ap-tween_decks-r r \n",
"114349 oewn-hood-n oewn--ap-hood-n n \n",
"\n",
" @lexfile members \n",
"81633 noun.location -ap-s_Gravenhage-n \n",
"106115 adv.all -ap-tween-r \n",
"107858 adv.all -ap-tween_decks-r \n",
"114349 noun.group -ap-hood-n \n"
]
}
]
},
{
"cell_type": "code",
"source": [
"df.head()"
],
"metadata": {
"id": "I1ihnJ0RrmHy",
"outputId": "f3047e65-2191-4286-fd7d-1502aaab8842",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 206
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" @members @partOfSpeech @lexfile members\n",
"0 oewn-avenged-a a adj.ppl avenged-a\n",
"1 oewn-unavenged-a a adj.ppl unavenged-a\n",
"2 oewn-beaten-a a adj.ppl beaten-a\n",
"3 oewn-calibrated-a oewn-graduated-a a adj.ppl calibrated-a\n",
"3 oewn-calibrated-a oewn-graduated-a a adj.ppl graduated-a"
],
"text/html": [
"\n",
" \n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" @members | \n",
" @partOfSpeech | \n",
" @lexfile | \n",
" members | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" oewn-avenged-a | \n",
" a | \n",
" adj.ppl | \n",
" avenged-a | \n",
"
\n",
" \n",
" 1 | \n",
" oewn-unavenged-a | \n",
" a | \n",
" adj.ppl | \n",
" unavenged-a | \n",
"
\n",
" \n",
" 2 | \n",
" oewn-beaten-a | \n",
" a | \n",
" adj.ppl | \n",
" beaten-a | \n",
"
\n",
" \n",
" 3 | \n",
" oewn-calibrated-a oewn-graduated-a | \n",
" a | \n",
" adj.ppl | \n",
" calibrated-a | \n",
"
\n",
" \n",
" 3 | \n",
" oewn-calibrated-a oewn-graduated-a | \n",
" a | \n",
" adj.ppl | \n",
" graduated-a | \n",
"
\n",
" \n",
"
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"variable_name": "df"
}
},
"metadata": {},
"execution_count": 59
}
]
},
{
"cell_type": "code",
"source": [
"df['members'] = df['members'].apply(lambda x: x[4:] if x.startswith('-ap-') else x)"
],
"metadata": {
"id": "-6a_qlCUsvh4"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"df.drop(columns=['@members'], inplace=True)"
],
"metadata": {
"id": "SXHfgstpsyJi"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"suffixes = df['members'].str.split('-').str[-1]\n",
"\n",
"# Count frequencies of suffixes\n",
"suffix_freq = suffixes.value_counts().reset_index()\n",
"suffix_freq.columns = ['Suffix', 'Frequency']\n",
"\n",
"# Sort by frequency\n",
"suffix_freq = suffix_freq.sort_values(by='Frequency', ascending=False)\n",
"\n",
"# Display suffixes ordered by frequency\n",
"print(suffix_freq[:40])"
],
"metadata": {
"id": "m3qrn1yrtDHT",
"outputId": "c410f569-75dd-4de3-a906-cf5a775be230",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
" Suffix Frequency\n",
"0 n 151001\n",
"1 a 30150\n",
"2 v 25098\n",
"3 r 5595\n",
"4 1 146\n",
"5 2 69\n",
"6 s 12\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"filtered_df = df[df['members'].str.endswith(('1', '2', 's'))]\n",
"\n",
"print(filtered_df)"
],
"metadata": {
"id": "zZf0TzcLxKhe",
"outputId": "b3e6d6fe-6407-42d0-9b09-e4b4506cb646",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
" @partOfSpeech @lexfile members\n",
"286 n noun.shape lead-n-1\n",
"301 n noun.shape bow-n-1\n",
"325 n noun.shape tower-n-1\n",
"782 s adj.all panelled-s\n",
"2303 s adj.all centre-s\n",
"... ... ... ...\n",
"117472 v verb.body tear-v-2\n",
"117596 v verb.body recover-v-1\n",
"118299 v verb.communication bow-v-1\n",
"118397 v verb.communication bow-v-1\n",
"118473 v verb.communication whoop-v-1\n",
"\n",
"[227 rows x 3 columns]\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"df['members'] = df['members'].apply(lambda x: x.replace('-ap-', \"'\")) # They use this for apostrophe for some reason, probably because it was stored as yaml"
],
"metadata": {
"id": "LRCd0Zy_trIB"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# List of suffixes to remove\n",
"suffixes_to_remove = ['-n', '-a', '-v', '-r', '-1', '-2', '-s']\n",
"\n",
"# Function to remove suffixes\n",
"def remove_suffixes(member):\n",
" # Iterate until no suffixes are left\n",
" while any(member.endswith(suffix) for suffix in suffixes_to_remove):\n",
" for suffix in suffixes_to_remove:\n",
" if member.endswith(suffix):\n",
" member = member[:-len(suffix)] # Remove the suffix\n",
" return member\n",
"\n",
"# Apply the function to each member in the DataFrame\n",
"df['members'] = df['members'].apply(remove_suffixes)\n",
"\n",
"# Display the updated DataFrame\n",
"df.head()"
],
"metadata": {
"id": "_dmFfqOwx3X4",
"outputId": "94b513b4-31e0-4cb8-8f87-97ca2e656a19",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 206
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" @partOfSpeech @lexfile members\n",
"0 a adj.ppl avenged\n",
"1 a adj.ppl unavenged\n",
"2 a adj.ppl beaten\n",
"3 a adj.ppl calibrated\n",
"3 a adj.ppl graduated"
],
"text/html": [
"\n",
" \n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" @partOfSpeech | \n",
" @lexfile | \n",
" members | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" a | \n",
" adj.ppl | \n",
" avenged | \n",
"
\n",
" \n",
" 1 | \n",
" a | \n",
" adj.ppl | \n",
" unavenged | \n",
"
\n",
" \n",
" 2 | \n",
" a | \n",
" adj.ppl | \n",
" beaten | \n",
"
\n",
" \n",
" 3 | \n",
" a | \n",
" adj.ppl | \n",
" calibrated | \n",
"
\n",
" \n",
" 3 | \n",
" a | \n",
" adj.ppl | \n",
" graduated | \n",
"
\n",
" \n",
"
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"variable_name": "df"
}
},
"metadata": {},
"execution_count": 65
}
]
},
{
"cell_type": "code",
"source": [
"df['members'] = df['members'].apply(lambda x: \" \".join(x.split(\"_\")))"
],
"metadata": {
"id": "1XbVExgWwaVo"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"df.head()"
],
"metadata": {
"id": "e4u-FcGRwkbM",
"outputId": "f842eccc-d7c3-4479-e7ed-12ddfe0d6afb",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 206
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" @partOfSpeech @lexfile members\n",
"0 a adj.ppl avenged\n",
"1 a adj.ppl unavenged\n",
"2 a adj.ppl beaten\n",
"3 a adj.ppl calibrated\n",
"3 a adj.ppl graduated"
],
"text/html": [
"\n",
" \n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" @partOfSpeech | \n",
" @lexfile | \n",
" members | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" a | \n",
" adj.ppl | \n",
" avenged | \n",
"
\n",
" \n",
" 1 | \n",
" a | \n",
" adj.ppl | \n",
" unavenged | \n",
"
\n",
" \n",
" 2 | \n",
" a | \n",
" adj.ppl | \n",
" beaten | \n",
"
\n",
" \n",
" 3 | \n",
" a | \n",
" adj.ppl | \n",
" calibrated | \n",
"
\n",
" \n",
" 3 | \n",
" a | \n",
" adj.ppl | \n",
" graduated | \n",
"
\n",
" \n",
"
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"variable_name": "df"
}
},
"metadata": {},
"execution_count": 67
}
]
},
{
"cell_type": "code",
"source": [
"pd.get_dummies(df['@partOfSpeech'])"
],
"metadata": {
"id": "3VmUnWJel5CK",
"outputId": "ee86f84b-2af6-4be2-e251-1d46fa792139",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 423
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" a n r s v\n",
"0 1 0 0 0 0\n",
"1 1 0 0 0 0\n",
"2 1 0 0 0 0\n",
"3 1 0 0 0 0\n",
"3 1 0 0 0 0\n",
"... .. .. .. .. ..\n",
"120130 0 1 0 0 0\n",
"120131 0 1 0 0 0\n",
"120132 0 1 0 0 0\n",
"120133 0 1 0 0 0\n",
"120134 0 1 0 0 0\n",
"\n",
"[212071 rows x 5 columns]"
],
"text/html": [
"\n",
" \n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" a | \n",
" n | \n",
" r | \n",
" s | \n",
" v | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 1 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 2 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 3 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 3 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
"
\n",
" \n",
" 120130 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 120131 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 120132 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 120133 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 120134 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
"
\n",
"
212071 rows × 5 columns
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe"
}
},
"metadata": {},
"execution_count": 68
}
]
},
{
"cell_type": "code",
"source": [
"pd.get_dummies(df['@lexfile'])"
],
"metadata": {
"id": "-ypW9xpkmC1W",
"outputId": "f09691b9-0d4c-4785-cdc5-30b76c5b803e",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 443
}
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" adj.all adj.pert adj.ppl adv.all noun.Tops noun.act noun.animal \\\n",
"0 0 0 1 0 0 0 0 \n",
"1 0 0 1 0 0 0 0 \n",
"2 0 0 1 0 0 0 0 \n",
"3 0 0 1 0 0 0 0 \n",
"3 0 0 1 0 0 0 0 \n",
"... ... ... ... ... ... ... ... \n",
"120130 0 0 0 0 0 0 0 \n",
"120131 0 0 0 0 0 0 0 \n",
"120132 0 0 0 0 0 0 0 \n",
"120133 0 0 0 0 0 0 0 \n",
"120134 0 0 0 0 0 0 0 \n",
"\n",
" noun.artifact noun.attribute noun.body ... verb.consumption \\\n",
"0 0 0 0 ... 0 \n",
"1 0 0 0 ... 0 \n",
"2 0 0 0 ... 0 \n",
"3 0 0 0 ... 0 \n",
"3 0 0 0 ... 0 \n",
"... ... ... ... ... ... \n",
"120130 0 0 0 ... 0 \n",
"120131 0 0 0 ... 0 \n",
"120132 0 0 0 ... 0 \n",
"120133 0 0 0 ... 0 \n",
"120134 0 0 0 ... 0 \n",
"\n",
" verb.contact verb.creation verb.emotion verb.motion \\\n",
"0 0 0 0 0 \n",
"1 0 0 0 0 \n",
"2 0 0 0 0 \n",
"3 0 0 0 0 \n",
"3 0 0 0 0 \n",
"... ... ... ... ... \n",
"120130 0 0 0 0 \n",
"120131 0 0 0 0 \n",
"120132 0 0 0 0 \n",
"120133 0 0 0 0 \n",
"120134 0 0 0 0 \n",
"\n",
" verb.perception verb.possession verb.social verb.stative \\\n",
"0 0 0 0 0 \n",
"1 0 0 0 0 \n",
"2 0 0 0 0 \n",
"3 0 0 0 0 \n",
"3 0 0 0 0 \n",
"... ... ... ... ... \n",
"120130 0 0 0 0 \n",
"120131 0 0 0 0 \n",
"120132 0 0 0 0 \n",
"120133 0 0 0 0 \n",
"120134 0 0 0 0 \n",
"\n",
" verb.weather \n",
"0 0 \n",
"1 0 \n",
"2 0 \n",
"3 0 \n",
"3 0 \n",
"... ... \n",
"120130 0 \n",
"120131 0 \n",
"120132 0 \n",
"120133 0 \n",
"120134 0 \n",
"\n",
"[212071 rows x 45 columns]"
],
"text/html": [
"\n",
" \n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" adj.all | \n",
" adj.pert | \n",
" adj.ppl | \n",
" adv.all | \n",
" noun.Tops | \n",
" noun.act | \n",
" noun.animal | \n",
" noun.artifact | \n",
" noun.attribute | \n",
" noun.body | \n",
" ... | \n",
" verb.consumption | \n",
" verb.contact | \n",
" verb.creation | \n",
" verb.emotion | \n",
" verb.motion | \n",
" verb.perception | \n",
" verb.possession | \n",
" verb.social | \n",
" verb.stative | \n",
" verb.weather | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 2 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 3 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 3 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
"
\n",
" \n",
" 120130 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 120131 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 120132 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 120133 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 120134 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
"
\n",
"
212071 rows × 45 columns
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe"
}
},
"metadata": {},
"execution_count": 69
}
]
},
{
"cell_type": "code",
"source": [
"df_to_upload = pd.concat([df['members'], pd.get_dummies(df['@partOfSpeech'])], axis=1)\n",
"df_to_upload = pd.concat([df_to_upload, pd.get_dummies(df['@lexfile'])], axis=1)"
],
"metadata": {
"id": "OC2_nyEpE-DS"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "tC1ZbcL9RkV6",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 496
},
"outputId": "f6ca30d8-2bca-447f-c60f-6126381d6e74"
},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" members a n r s v adj.all adj.pert adj.ppl \\\n",
"0 avenged 1 0 0 0 0 0 0 1 \n",
"1 unavenged 1 0 0 0 0 0 0 1 \n",
"2 beaten 1 0 0 0 0 0 0 1 \n",
"3 calibrated 1 0 0 0 0 0 0 1 \n",
"3 graduated 1 0 0 0 0 0 0 1 \n",
"... ... .. .. .. .. .. ... ... ... \n",
"120130 bromoil process 0 1 0 0 0 0 0 0 \n",
"120131 interfixation 0 1 0 0 0 0 0 0 \n",
"120132 consonant mutation 0 1 0 0 0 0 0 0 \n",
"120133 cohesion 0 1 0 0 0 0 0 0 \n",
"120134 grammatical cohesion 0 1 0 0 0 0 0 0 \n",
"\n",
" adv.all ... verb.consumption verb.contact verb.creation \\\n",
"0 0 ... 0 0 0 \n",
"1 0 ... 0 0 0 \n",
"2 0 ... 0 0 0 \n",
"3 0 ... 0 0 0 \n",
"3 0 ... 0 0 0 \n",
"... ... ... ... ... ... \n",
"120130 0 ... 0 0 0 \n",
"120131 0 ... 0 0 0 \n",
"120132 0 ... 0 0 0 \n",
"120133 0 ... 0 0 0 \n",
"120134 0 ... 0 0 0 \n",
"\n",
" verb.emotion verb.motion verb.perception verb.possession \\\n",
"0 0 0 0 0 \n",
"1 0 0 0 0 \n",
"2 0 0 0 0 \n",
"3 0 0 0 0 \n",
"3 0 0 0 0 \n",
"... ... ... ... ... \n",
"120130 0 0 0 0 \n",
"120131 0 0 0 0 \n",
"120132 0 0 0 0 \n",
"120133 0 0 0 0 \n",
"120134 0 0 0 0 \n",
"\n",
" verb.social verb.stative verb.weather \n",
"0 0 0 0 \n",
"1 0 0 0 \n",
"2 0 0 0 \n",
"3 0 0 0 \n",
"3 0 0 0 \n",
"... ... ... ... \n",
"120130 0 0 0 \n",
"120131 0 0 0 \n",
"120132 0 0 0 \n",
"120133 0 0 0 \n",
"120134 0 0 0 \n",
"\n",
"[212071 rows x 51 columns]"
],
"text/html": [
"\n",
" \n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" members | \n",
" a | \n",
" n | \n",
" r | \n",
" s | \n",
" v | \n",
" adj.all | \n",
" adj.pert | \n",
" adj.ppl | \n",
" adv.all | \n",
" ... | \n",
" verb.consumption | \n",
" verb.contact | \n",
" verb.creation | \n",
" verb.emotion | \n",
" verb.motion | \n",
" verb.perception | \n",
" verb.possession | \n",
" verb.social | \n",
" verb.stative | \n",
" verb.weather | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" avenged | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 1 | \n",
" unavenged | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 2 | \n",
" beaten | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 3 | \n",
" calibrated | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 3 | \n",
" graduated | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
"
\n",
" \n",
" 120130 | \n",
" bromoil process | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 120131 | \n",
" interfixation | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 120132 | \n",
" consonant mutation | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 120133 | \n",
" cohesion | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 120134 | \n",
" grammatical cohesion | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
"
\n",
"
212071 rows × 51 columns
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"variable_name": "df_to_upload"
}
},
"metadata": {},
"execution_count": 82
}
],
"source": [
"df_to_upload"
]
},
{
"cell_type": "code",
"source": [
"df_to_upload.head()"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 235
},
"id": "EzZKf74kE2cL",
"outputId": "7cb82111-04cd-415f-a5d6-3b97c10ac42f"
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" members a n r s v adj.all adj.pert adj.ppl adv.all ... \\\n",
"0 avenged 1 0 0 0 0 0 0 1 0 ... \n",
"1 unavenged 1 0 0 0 0 0 0 1 0 ... \n",
"2 beaten 1 0 0 0 0 0 0 1 0 ... \n",
"3 calibrated 1 0 0 0 0 0 0 1 0 ... \n",
"3 graduated 1 0 0 0 0 0 0 1 0 ... \n",
"\n",
" verb.consumption verb.contact verb.creation verb.emotion verb.motion \\\n",
"0 0 0 0 0 0 \n",
"1 0 0 0 0 0 \n",
"2 0 0 0 0 0 \n",
"3 0 0 0 0 0 \n",
"3 0 0 0 0 0 \n",
"\n",
" verb.perception verb.possession verb.social verb.stative verb.weather \n",
"0 0 0 0 0 0 \n",
"1 0 0 0 0 0 \n",
"2 0 0 0 0 0 \n",
"3 0 0 0 0 0 \n",
"3 0 0 0 0 0 \n",
"\n",
"[5 rows x 51 columns]"
],
"text/html": [
"\n",
" \n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" members | \n",
" a | \n",
" n | \n",
" r | \n",
" s | \n",
" v | \n",
" adj.all | \n",
" adj.pert | \n",
" adj.ppl | \n",
" adv.all | \n",
" ... | \n",
" verb.consumption | \n",
" verb.contact | \n",
" verb.creation | \n",
" verb.emotion | \n",
" verb.motion | \n",
" verb.perception | \n",
" verb.possession | \n",
" verb.social | \n",
" verb.stative | \n",
" verb.weather | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" avenged | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 1 | \n",
" unavenged | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 2 | \n",
" beaten | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 3 | \n",
" calibrated | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 3 | \n",
" graduated | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
"
\n",
"
5 rows × 51 columns
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"variable_name": "df_to_upload"
}
},
"metadata": {},
"execution_count": 83
}
]
},
{
"cell_type": "code",
"source": [
"df_to_upload = df_to_upload.groupby('members').max().reset_index()\n"
],
"metadata": {
"id": "WLXGuobjAIQZ"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"df_to_upload"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 513
},
"id": "QboSaTYxEkgV",
"outputId": "df4fc32e-9fce-459c-b6ed-d632a28322bb"
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" members a n r s v adj.all adj.pert adj.ppl adv.all ... \\\n",
"0 .22 0 1 0 0 0 0 0 0 0 ... \n",
"1 .22 caliber 1 0 0 0 0 0 1 0 0 ... \n",
"2 .22 calibre 1 0 0 0 0 0 1 0 0 ... \n",
"3 .22-caliber 1 0 0 0 0 0 1 0 0 ... \n",
"4 .22-calibre 1 0 0 0 0 0 1 0 0 ... \n",
"... ... .. .. .. .. .. ... ... ... ... ... \n",
"153356 zymolysis 0 1 0 0 0 0 0 0 0 ... \n",
"153357 zymolytic 1 0 0 0 0 0 1 0 0 ... \n",
"153358 zymosis 0 1 0 0 0 0 0 0 0 ... \n",
"153359 zymotic 1 0 0 0 0 0 1 0 0 ... \n",
"153360 zymurgy 0 1 0 0 0 0 0 0 0 ... \n",
"\n",
" verb.consumption verb.contact verb.creation verb.emotion \\\n",
"0 0 0 0 0 \n",
"1 0 0 0 0 \n",
"2 0 0 0 0 \n",
"3 0 0 0 0 \n",
"4 0 0 0 0 \n",
"... ... ... ... ... \n",
"153356 0 0 0 0 \n",
"153357 0 0 0 0 \n",
"153358 0 0 0 0 \n",
"153359 0 0 0 0 \n",
"153360 0 0 0 0 \n",
"\n",
" verb.motion verb.perception verb.possession verb.social \\\n",
"0 0 0 0 0 \n",
"1 0 0 0 0 \n",
"2 0 0 0 0 \n",
"3 0 0 0 0 \n",
"4 0 0 0 0 \n",
"... ... ... ... ... \n",
"153356 0 0 0 0 \n",
"153357 0 0 0 0 \n",
"153358 0 0 0 0 \n",
"153359 0 0 0 0 \n",
"153360 0 0 0 0 \n",
"\n",
" verb.stative verb.weather \n",
"0 0 0 \n",
"1 0 0 \n",
"2 0 0 \n",
"3 0 0 \n",
"4 0 0 \n",
"... ... ... \n",
"153356 0 0 \n",
"153357 0 0 \n",
"153358 0 0 \n",
"153359 0 0 \n",
"153360 0 0 \n",
"\n",
"[153361 rows x 51 columns]"
],
"text/html": [
"\n",
" \n",
"
\n",
"\n",
"
\n",
" \n",
" \n",
" | \n",
" members | \n",
" a | \n",
" n | \n",
" r | \n",
" s | \n",
" v | \n",
" adj.all | \n",
" adj.pert | \n",
" adj.ppl | \n",
" adv.all | \n",
" ... | \n",
" verb.consumption | \n",
" verb.contact | \n",
" verb.creation | \n",
" verb.emotion | \n",
" verb.motion | \n",
" verb.perception | \n",
" verb.possession | \n",
" verb.social | \n",
" verb.stative | \n",
" verb.weather | \n",
"
\n",
" \n",
" \n",
" \n",
" 0 | \n",
" .22 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 1 | \n",
" .22 caliber | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 2 | \n",
" .22 calibre | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 3 | \n",
" .22-caliber | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 4 | \n",
" .22-calibre | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
" ... | \n",
"
\n",
" \n",
" 153356 | \n",
" zymolysis | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 153357 | \n",
" zymolytic | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 153358 | \n",
" zymosis | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 153359 | \n",
" zymotic | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
" 153360 | \n",
" zymurgy | \n",
" 0 | \n",
" 1 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" ... | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
" 0 | \n",
"
\n",
" \n",
"
\n",
"
153361 rows × 51 columns
\n",
"
\n",
"
\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"variable_name": "df_to_upload"
}
},
"metadata": {},
"execution_count": 86
}
]
},
{
"cell_type": "code",
"source": [
"df_to_upload.to_csv(\"openwordnet-categoricals.csv\", index=False)"
],
"metadata": {
"id": "dDMrTmRQFovn"
},
"execution_count": null,
"outputs": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.18"
},
"colab": {
"provenance": [],
"gpuType": "T4",
"name": "openwordnet-to-categoricals.ipynb"
},
"accelerator": "GPU"
},
"nbformat": 4,
"nbformat_minor": 0
}