{ "cells": [ { "cell_type": "code", "execution_count": 2, "id": "809c06bc", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import numpy as np\n", "import spacy" ] }, { "cell_type": "code", "execution_count": 3, "id": "e8b0dce4", "metadata": {}, "outputs": [], "source": [ "import en_ner_bc5cdr_md\n", "import en_core_med7_lg" ] }, { "cell_type": "code", "execution_count": 4, "id": "84d4467f", "metadata": {}, "outputs": [], "source": [ "# To install spaCy's pre-trained model en_ner_bc5cdr_md, en_core_med7_lg use the link below. You can also install the transformer\n", "\n", "# !pip install https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.5.1/en_ner_bc5cdr_md-0.5.1.tar.gz\n", "# !pip install https://huggingface.co/kormilitzin/en_core_med7_trf/resolve/main/en_core_med7_trf-any-py3-none-any.whl --user\n", "# !pip install https://huggingface.co/kormilitzin/en_core_med7_lg/resolve/main/en_core_med7_lg-any-py3-none-any.whl --user" ] }, { "cell_type": "code", "execution_count": 5, "id": "1ee710ca", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
descriptionmedical_specialtysample_nametranscriptionkeywords
0A 23-year-old white female presents with comp...Allergy / ImmunologyAllergic RhinitisSUBJECTIVE:, This 23-year-old white female pr...allergy / immunology, allergic rhinitis, aller...
1Consult for laparoscopic gastric bypass.BariatricsLaparoscopic Gastric Bypass Consult - 2PAST MEDICAL HISTORY:, He has difficulty climb...bariatrics, laparoscopic gastric bypass, weigh...
2Consult for laparoscopic gastric bypass.BariatricsLaparoscopic Gastric Bypass Consult - 1HISTORY OF PRESENT ILLNESS: , I have seen ABC ...bariatrics, laparoscopic gastric bypass, heart...
32-D M-Mode. Doppler.Cardiovascular / Pulmonary2-D Echocardiogram - 12-D M-MODE: , ,1. Left atrial enlargement wit...cardiovascular / pulmonary, 2-d m-mode, dopple...
42-D EchocardiogramCardiovascular / Pulmonary2-D Echocardiogram - 21. The left ventricular cavity size and wall ...cardiovascular / pulmonary, 2-d, doppler, echo...
\n", "
" ], "text/plain": [ " description \\\n", "0 A 23-year-old white female presents with comp... \n", "1 Consult for laparoscopic gastric bypass. \n", "2 Consult for laparoscopic gastric bypass. \n", "3 2-D M-Mode. Doppler. \n", "4 2-D Echocardiogram \n", "\n", " medical_specialty sample_name \\\n", "0 Allergy / Immunology Allergic Rhinitis \n", "1 Bariatrics Laparoscopic Gastric Bypass Consult - 2 \n", "2 Bariatrics Laparoscopic Gastric Bypass Consult - 1 \n", "3 Cardiovascular / Pulmonary 2-D Echocardiogram - 1 \n", "4 Cardiovascular / Pulmonary 2-D Echocardiogram - 2 \n", "\n", " transcription \\\n", "0 SUBJECTIVE:, This 23-year-old white female pr... \n", "1 PAST MEDICAL HISTORY:, He has difficulty climb... \n", "2 HISTORY OF PRESENT ILLNESS: , I have seen ABC ... \n", "3 2-D M-MODE: , ,1. Left atrial enlargement wit... \n", "4 1. The left ventricular cavity size and wall ... \n", "\n", " keywords \n", "0 allergy / immunology, allergic rhinitis, aller... \n", "1 bariatrics, laparoscopic gastric bypass, weigh... \n", "2 bariatrics, laparoscopic gastric bypass, heart... \n", "3 cardiovascular / pulmonary, 2-d m-mode, dopple... \n", "4 cardiovascular / pulmonary, 2-d, doppler, echo... " ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "med = pd.read_csv('mtsamples.csv', index_col=0)\n", "med.head()" ] }, { "cell_type": "code", "execution_count": 6, "id": "453c00da", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "description 0\n", "medical_specialty 0\n", "sample_name 0\n", "transcription 33\n", "keywords 1068\n", "dtype: int64" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "med.isnull().sum()" ] }, { "cell_type": "code", "execution_count": 7, "id": "ac84c247", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(4999, 5)" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "med.shape" ] }, { "cell_type": "code", "execution_count": 8, "id": "bc3d4bd7", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "Int64Index: 4999 entries, 0 to 4998\n", "Data columns (total 5 columns):\n", " # Column Non-Null Count Dtype \n", "--- ------ -------------- ----- \n", " 0 description 4999 non-null object\n", " 1 medical_specialty 4999 non-null object\n", " 2 sample_name 4999 non-null object\n", " 3 transcription 4966 non-null object\n", " 4 keywords 3931 non-null object\n", "dtypes: object(5)\n", "memory usage: 234.3+ KB\n" ] } ], "source": [ "med.info()" ] }, { "cell_type": "code", "execution_count": 9, "id": "79682feb", "metadata": {}, "outputs": [], "source": [ "import re\n", "\n", "med['transcription'] = med['transcription'].astype('str')\n", "med['transcription'] = med['transcription'].apply(lambda x: re.sub('(\\.,)', \". \", x))" ] }, { "cell_type": "code", "execution_count": 10, "id": "6121d1e4", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Document:\n", "SUBJECTIVE:, This 23-year-old white female presents with complaint of allergies. She used to have allergies when she lived in Seattle but she thinks they are worse here. In the past, she has tried Claritin, and Zyrtec. Both worked for short time but then seemed to lose effectiveness. She has used Allegra also. She used that last summer and she began using it again two weeks ago. It does not appear to be working very well. She has used over-the-counter sprays but no prescription nasal sprays. She does have asthma but doest not require daily medication for this and does not think it is flaring up. MEDICATIONS: , Her only medication currently is Ortho Tri-Cyclen and the Allegra. ALLERGIES: , She has no known medicine allergies. OBJECTIVE:,Vitals: Weight was 130 pounds and blood pressure 124/78. HEENT: Her throat was mildly erythematous without exudate. Nasal mucosa was erythematous and swollen. Only clear drainage was seen. TMs were clear. Neck: Supple without adenopathy. Lungs: Clear. ASSESSMENT:, Allergic rhinitis. PLAN:,1. She will try Zyrtec instead of Allegra again. Another option will be to use loratadine. She does not think she has prescription coverage so that might be cheaper. 2. Samples of Nasonex two sprays in each nostril given for three weeks. A prescription was written as well.\n", "Annotations:\n", "{'entities': [(200, 208, 'DRUG'), (214, 220, 'DRUG'), (549, 554, 'FREQUENCY'), (1070, 1076, 'DRUG'), (1134, 1144, 'DRUG'), (1237, 1244, 'DRUG'), (1245, 1248, 'DOSAGE'), (1249, 1255, 'FORM'), (1259, 1263, 'DOSAGE'), (1278, 1293, 'DURATION')]}\n" ] } ], "source": [ "nlp = spacy.load(\"en_core_med7_lg\")\n", "\n", "# This function generate anotation for each entities and label\n", "def generate_annotation(texts):\n", " annotations = []\n", " for text in texts:\n", " doc = nlp(text)\n", " entities = []\n", " for ent in doc.ents:\n", " entities.append((ent.start_char, ent.end_char, ent.label_))\n", " annotations.append((text, {'entities': entities}))\n", " return annotations\n", "\n", "# Extract text entities and labels from the dataset (transcription)\n", "medical_doc = med['transcription'].tolist()\n", "\n", "# Let's generate annotations\n", "annotations = generate_annotation(medical_doc)\n", "\n", "# Let's print documents and annotations\n", "print(\"Document:\")\n", "print(annotations[0][0]) # first document text\n", "print(\"Annotations:\")\n", "print(annotations[0][1]) # annotation for the first document" ] }, { "cell_type": "code", "execution_count": null, "id": "7ac6f901", "metadata": {}, "outputs": [], "source": [ "#med['annotation'] = med['transcription'].apply(lambda x:generate_annotation(x))" ] }, { "cell_type": "code", "execution_count": 33, "id": "846fddb7", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
SUBJECTIVE:, This 23-year-old white female presents with complaint of allergies. She used to have allergies when she lived in Seattle but she thinks they are worse here. In the past, she has tried \n", "\n", " Claritin\n", " DRUG\n", "\n", ", and \n", "\n", " Zyrtec\n", " DRUG\n", "\n", ". Both worked for short time but then seemed to lose effectiveness. She has used Allegra also. She used that last summer and she began using it again two weeks ago. It does not appear to be working very well. She has used over-the-counter sprays but no prescription nasal sprays. She does have asthma but doest not require \n", "\n", " daily\n", " FREQUENCY\n", "\n", " medication for this and does not think it is flaring up. MEDICATIONS: , Her only medication currently is Ortho Tri-Cyclen and the Allegra. ALLERGIES: , She has no known medicine allergies. OBJECTIVE:,Vitals: Weight was 130 pounds and blood pressure 124/78. HEENT: Her throat was mildly erythematous without exudate. Nasal mucosa was erythematous and swollen. Only clear drainage was seen. TMs were clear. Neck: Supple without adenopathy. Lungs: Clear. ASSESSMENT:, Allergic rhinitis. PLAN:,1. She will try \n", "\n", " Zyrtec\n", " DRUG\n", "\n", " instead of Allegra again. Another option will be to use \n", "\n", " loratadine\n", " DRUG\n", "\n", ". She does not think she has prescription coverage so that might be cheaper. 2. Samples of \n", "\n", " Nasonex\n", " DRUG\n", "\n", " \n", "\n", " two\n", " DOSAGE\n", "\n", " \n", "\n", " sprays\n", " FORM\n", "\n", " in \n", "\n", " each\n", " DOSAGE\n", "\n", " nostril given \n", "\n", " for three weeks\n", " DURATION\n", "\n", ". A prescription was written as well.
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [ "[('Claritin', 'DRUG'),\n", " ('Zyrtec', 'DRUG'),\n", " ('daily', 'FREQUENCY'),\n", " ('Zyrtec', 'DRUG'),\n", " ('loratadine', 'DRUG'),\n", " ('Nasonex', 'DRUG'),\n", " ('two', 'DOSAGE'),\n", " ('sprays', 'FORM'),\n", " ('each', 'DOSAGE'),\n", " ('for three weeks', 'DURATION')]" ] }, "execution_count": 33, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from spacy import displacy\n", "nlp = spacy.load(\"en_core_med7_lg\")\n", "\n", "# Create distict colours for labels\n", "\n", "col_dict = {}\n", "s_colours = ['#e6194B', '#3cb44b', '#ffe119', '#ffd8b1', '#f58231', '#f032e6', '#42d4f4']\n", "for label, colour in zip(nlp.pipe_labels['ner'], s_colours):\n", " col_dict[label] = colour\n", "\n", "options = {'ents': nlp.pipe_labels['ner'], 'colors':col_dict}\n", "\n", "transcription = med['transcription'][0]\n", "doc = nlp(transcription)\n", "\n", "spacy.displacy.render(doc, style = 'ent', jupyter = True, options = options)\n", "\n", "[(ent.text, ent.label_) for ent in doc.ents]" ] }, { "cell_type": "code", "execution_count": 25, "id": "44ec9bdb", "metadata": {}, "outputs": [], "source": [ "med_adj = med.sample(n=111, replace = False, random_state=42)" ] }, { "cell_type": "code", "execution_count": 21, "id": "b38de759", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Text: snoring, Entity Type: DISEASE\n", "Text: pains, Entity Type: DISEASE\n", "Text: knee pain, Entity Type: DISEASE\n", "Text: pain, Entity Type: DISEASE\n", "Text: ankle pain, Entity Type: DISEASE\n", "Text: gastroesophageal reflux disease, Entity Type: DISEASE\n", "Text: Heart disease, Entity Type: DISEASE\n", "Text: stroke, Entity Type: DISEASE\n", "Text: diabetes, Entity Type: DISEASE\n", "Text: obesity, Entity Type: DISEASE\n", "Text: hypertension, Entity Type: DISEASE\n", "Text: allergic, Entity Type: DISEASE\n", "Text: Penicillin, Entity Type: CHEMICAL\n", "Text: chest pain, Entity Type: DISEASE\n", "Text: coronary artery disease, Entity Type: DISEASE\n", "Text: congestive heart failure, Entity Type: DISEASE\n", "Text: arrhythmia, Entity Type: DISEASE\n", "Text: atrial fibrillation, Entity Type: DISEASE\n", "Text: cholesterol, Entity Type: CHEMICAL\n", "Text: pulmonary embolism, Entity Type: DISEASE\n", "Text: CVA, Entity Type: CHEMICAL\n", "Text: venous insufficiency, Entity Type: DISEASE\n", "Text: thrombophlebitis, Entity Type: DISEASE\n", "Text: asthma, Entity Type: DISEASE\n", "Text: shortness of breath, Entity Type: DISEASE\n", "Text: COPD, Entity Type: DISEASE\n", "Text: emphysema, Entity Type: DISEASE\n", "Text: sleep apnea, Entity Type: DISEASE\n", "Text: diabetes, Entity Type: DISEASE\n", "Text: osteoarthritis, Entity Type: DISEASE\n", "Text: rheumatoid arthritis, Entity Type: DISEASE\n", "Text: hiatal hernia, Entity Type: DISEASE\n", "Text: peptic ulcer disease, Entity Type: DISEASE\n", "Text: gallstones, Entity Type: DISEASE\n", "Text: pancreatitis, Entity Type: DISEASE\n", "Text: fatty liver, Entity Type: DISEASE\n", "Text: hepatitis, Entity Type: DISEASE\n", "Text: hemorrhoids, Entity Type: DISEASE\n", "Text: bleeding, Entity Type: DISEASE\n", "Text: polyps, Entity Type: DISEASE\n", "Text: incontinence, Entity Type: DISEASE\n", "Text: urinary stress incontinence, Entity Type: DISEASE\n", "Text: cancer, Entity Type: DISEASE\n", "Text: cellulitis, Entity Type: DISEASE\n", "Text: meningitis, Entity Type: DISEASE\n", "Text: encephalitis, Entity Type: DISEASE\n" ] } ], "source": [ "nlp = spacy.load(\"en_ner_bc5cdr_md\")\n", "\n", "transcription = med['transcription'].iloc[1]\n", "doc= nlp(transcription)\n", "\n", "# Let's extract and print all the entity\n", "for ent in doc.ents:\n", " print(f\"Text: {ent.text}, Entity Type: {ent.label_}\")\n", " #print(f\"Text: {ent.text}, Start: {ent.start_char}, End: {ent.end_char}, Entity Type: {ent.label_}\") you can also use this" ] }, { "cell_type": "code", "execution_count": 26, "id": "aad31e23", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "DRUG_DOSE 514 517 Diastat 20 mg\n", "DRUG_DOSE 518 521 Topamax 25 mg\n", "DRUG_DOSE 532 535 Tranxene 15 mg\n", "DRUG_DOSE 538 541 Depakote 125 mg\n", "DRUG_DOSE 729 732 Depacon 250 mg\n", "DRUG_DOSE 266 269 Pepcid 40 mg\n", "DRUG_DOSE 109 112 furosemide 40 mg\n", "DRUG_DOSE 897 900 diltiazem 120 mg\n", "DRUG_DOSE 433 436 Aspirin 325 mg\n", "DRUG_DOSE 443 446 Lisinopril 40 mg\n", "DRUG_DOSE 453 456 Felodipine 10 mg\n", "DRUG_DOSE 465 468 Con 20 mEq\n", "DRUG_DOSE 475 478 Omeprazole 20 mg\n", "DRUG_DOSE 488 491 MiraLax 17 g\n", "DRUG_DOSE 498 501 Lasix 20 mg\n", "DRUG_DOSE 282 285 Omeprazole 40 mg\n", "DRUG_DOSE 25 28 Prozac 20 mg\n", "DRUG_DOSE 274 277 Rocephin 250 mg\n", "DRUG_DOSE 278 281 azithromycin 1000 mg\n", "DRUG_DOSE 504 507 Coumadin 5 mg\n", "DRUG_DOSE 524 527 Aspirin 81 mg\n", "DRUG_DOSE 533 536 Hydrochlorothiazide 25 mg\n", "DRUG_DOSE 542 545 Plendil 10 mg\n", "DRUG_DOSE 550 553 Lipitor 40 mg\n", "DRUG_DOSE 955 958 dexamethasone 4 mg\n", "DRUG_DOSE 286 289 Plavix 75 mg\n", "DRUG_DOSE 294 297 metoprolol 25 mg\n", "DRUG_DOSE 302 305 Flomax 0.4 mg\n", "DRUG_DOSE 310 313 Zocor 20 mg\n", "DRUG_DOSE 327 330 lisinopril 10 mg\n", "DRUG_DOSE 78 81 iCAD Second Look\n", "DRUG_DOSE 334 337 iCAD Second Look\n", "DRUG_DOSE 27 30 fentanyl 25 mcg\n", "DRUG_DOSE 100 103 Xylocaine 1%\n", "DRUG_DOSE 66 69 Lexiscan 0.4 mg\n", "DRUG_DOSE 187 190 lidocaine 2%\n", "DRUG_DOSE 194 197 Marcaine 1.7 mL\n", "DRUG_DOSE 258 261 Plaquenil 200 mg\n", "DRUG_DOSE 268 271 Fosamax 170 mg\n", "DRUG_DOSE 290 293 acid 1 mg\n", "DRUG_DOSE 299 302 Trilisate 1000 mg\n", "DRUG_DOSE 320 323 Hydrochlorothiazide 15 mg\n", "DRUG_DOSE 330 333 Lopressor 50 mg\n", "DRUG_DOSE 344 347 Trazodone 100 mg\n", "DRUG_DOSE 353 356 Prempro 0.625 mg\n", "DRUG_DOSE 362 365 Aspirin 325 mg\n", "DRUG_DOSE 372 375 Lipitor 10 mg\n", "DRUG_DOSE 381 384 Pepcid 20 mg\n", "DRUG_DOSE 391 394 Reglan 10 mg\n", "DRUG_DOSE 403 406 Celexa 20 mg\n", "DRUG_DOSE 721 724 Azithromycin 5-\n", "DRUG_DOSE 746 749 Atarax 25 mg\n", "DRUG_DOSE 390 393 sotalol 80 mg\n", "DRUG_DOSE 398 401 metoprolol 50 mg\n", "DRUG_DOSE 414 417 digoxin 0.125 mg\n", "DRUG_DOSE 504 507 cephalexin 500 mg\n", "DRUG_DOSE 56 59 Versed 3,\n", "DRUG_DOSE 59 62 Demerol 25 and\n", "DRUG_DOSE 230 233 aspirin one tablet\n", "DRUG_DOSE 249 252 Warfarin 2.5 mg\n", "DRUG_DOSE 498 501 Synthroid 0.5 mg\n", "DRUG_DOSE 507 510 Plavix 75 mg\n", "DRUG_DOSE 515 518 acid 1 mg\n", "DRUG_DOSE 522 525 Diovan 80 mg\n", "DRUG_DOSE 529 532 Renagel 2 tablets\n", "DRUG_DOSE 540 543 Lasix 40 mg\n", "DRUG_DOSE 552 555 lovastatin 20 mg\n", "DRUG_DOSE 559 562 Coreg 3.125 mg\n", "DRUG_DOSE 577 580 Phenergan 25 mg\n", "DRUG_DOSE 589 592 Pepcid 20 mg\n", "DRUG_DOSE 596 599 Vicodin 1 tablet\n", "DRUG_DOSE 609 612 Levaquin 250 mg\n", "DRUG_DOSE 89 92 Coreg 6.25 mg\n", "DRUG_DOSE 132 135 Coreg 6.25 mg\n", "DRUG_DOSE 140 143 Simvastatin 40 mg\n", "DRUG_DOSE 148 151 Lisinopril 5 mg\n", "DRUG_DOSE 156 159 Protonix 40 mg\n", "DRUG_DOSE 163 166 Aspirin 160 mg\n", "DRUG_DOSE 172 175 Lasix 20 mg\n", "DRUG_DOSE 195 198 Advair 500/50 puff\n" ] } ], "source": [ "from spacy.matcher import Matcher\n", "\n", "# Let's load the model\n", "nlp = spacy.load(\"en_core_med7_lg\")\n", "\n", "patterns = [\n", " [{\"ENT_TYPE\": \"DRUG\"}, {\"LIKE_NUM\": True}, {\"IS_ASCII\": True}],\n", " [{\"LOWER\": {\"IN\": [\"mg\", \"g\", \"ml\"]}}, {\"ENT_TYPE\": \"DRUG\"}],\n", " [{\"ENT_TYPE\": \"DRUG\"}, {\"IS_DIGIT\": True, \"OP\": \"?\"}, {\"LOWER\": {\"IN\": [\"mg\", \"g\", \"ml\"]}}]\n", "]\n", "\n", "matcher = Matcher(nlp.vocab)\n", "matcher.add(\"DRUG_DOSE\", patterns)\n", "\n", "for transcription in med_adj['transcription']:\n", " doc = nlp(transcription)\n", " matches = matcher(doc)\n", " for match_id, start, end in matches:\n", " string_id = nlp.vocab.strings[match_id]\n", " span = doc[start:end]\n", " print(string_id, start, end, span.text)\n" ] }, { "cell_type": "code", "execution_count": 34, "id": "815b1041", "metadata": {}, "outputs": [], "source": [ "med_adj = med.sample(n=3, replace = False, random_state=42)" ] }, { "cell_type": "code", "execution_count": null, "id": "824246e5", "metadata": {}, "outputs": [], "source": [ "# Let's load our pretrained spacy model\n", "\n", "nlp = spacy.load(\"en_core_med7_lg\")\n", "\n", "# this function will extract relevant entities and labels needed from medical transcription \n", "\n", "def extract_keywords(text):\n", " doc = nlp(text)\n", " entities = []\n", " entities = [(ent.text, ent.label_) for ent in doc.ents]\n", " return entities\n", "\n", "# Lets define our categories\n", "surgery_keywords = [\"surgery\", \"operation\", \"procedure\", \"acute Cholangitis\", \"surgisis\", \"appendicitis\"]\n", "cardio_pul_keywords = [\"heart\", \"cardiovascular\", \"pulmonary\", \"lungs\"]\n", "orthopaedic_keywords = [\"orthopaedic\", \"bone\", \"joint\", \"fracture\"]\n", "neurology_keywords = [\"neurology\", \"nervours system\", \"brain\", \"nerve\"]\n", "general_med_keywords = [\"patient\", \"complaining\", \"history\", \"medical\"]\n", " \n", "# This will process each medical description and check for relevant keywords\n", "medical_doc = med['transcription']\n", "for transcription in medical_doc:\n", " entities = extract_keywords(transcription.lower())\n", " \n", " is_surgery = any(keyword in transcription.lower() for keyword in surgery_keywords)\n", " is_cardio_pul = any(keyword in transcription.lower() for keyword in cardio_pul_keywords)\n", " is_orthopaedic = any(keyword in transcription.lower() for keyword in orthopaedic_keywords)\n", " is_neurology = any(keyword in transcription.lower() for keyword in neurology_keywords)\n", " is_general_med = any(keyword in transcription.lower() for keyword in general_med_keywords)\n", " \n", " print(\"Transcription:\", transcription)\n", " print(\"Entities:\", entities)\n", " print(\"Is Surgery:\", is_surgery)\n", " print(\"Is Cardio Pulmonary:\", is_cardio_pul)\n", " print(\"Orthopaedic:\", is_orthopaedic)\n", " print(\"Neurology:\", is_neurology)\n", " print(\"General Medicine:\", is_general_med)" ] }, { "cell_type": "code", "execution_count": null, "id": "dcdbcd44", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.13" } }, "nbformat": 4, "nbformat_minor": 5 }