File size: 5,703 Bytes
75663d4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
# -*- coding: utf-8 -*-
"""using_dataset_hugginface.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1soGxkZu4antYbYG23GioJ6zoSt_GhSNT
"""
"""**Hugginface loggin for push on Hub**"""
###
#
# Used bibliografy:
# https://huggingface.co/learn/nlp-course/chapter5/5
#
###
import os
import time
import math
from huggingface_hub import login
from datasets import load_dataset, concatenate_datasets
from functools import reduce
from pathlib import Path
import pandas as pd
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
HF_TOKEN = ''
DATASET_TO_LOAD = 'bigbio/distemist'
DATASET_TO_UPDATE = 'somosnlp/spanish_medica_llm'
DATASET_SOURCE_ID = '9'
#Loggin to Huggin Face
login(token = HF_TOKEN)
dataset_CODING = load_dataset(DATASET_TO_LOAD)
royalListOfCode = {}
issues_path = 'dataset'
tokenizer = AutoTokenizer.from_pretrained("DeepESP/gpt2-spanish-medium")
#Read current path
path = Path(__file__).parent.absolute()
#print (dataset_CODING)
# with open( str(path) + os.sep + 'ICD-O-3_valid-codes.txt',encoding='utf8') as file:
# """
# # Build a dictionary with ICD-O-3 associated with
# # healtcare problems
# """
# linesInFile = file.readlines()
# for iLine in linesInFile:
# listOfData = iLine.split('\t')
# code = listOfData[0]
# description = reduce(lambda a, b: a + " "+ b, listOfData[1:2], "")
# royalListOfCode[code.strip()] = description.strip()
# def getCodeDescription(labels_of_type, royalListOfCode):
# """
# Search description associated with some code
# in royalListOfCode
# """
# classification = []
# for iValue in labels_of_type:
# if iValue in royalListOfCode.keys():
# classification.append(royalListOfCode[iValue])
# return classification
# # raw_text: Texto asociado al documento, pregunta, caso clínico u otro tipo de información.
# # topic: (puede ser healthcare_treatment, healthcare_diagnosis, tema, respuesta a pregunta, o estar vacío p.ej en el texto abierto)
# # speciality: (especialidad médica a la que se relaciona el raw_text p.ej: cardiología, cirugía, otros)
# # raw_text_type: (puede ser caso clínico, open_text, question)
# # topic_type: (puede ser medical_topic, medical_diagnostic,answer,natural_medicine_topic, other, o vacio)
# # source: Identificador de la fuente asociada al documento que aparece en el README y descripción del dataset.
# # country: Identificador del país de procedencia de la fuente (p.ej.; ch, es) usando el estándar ISO 3166-1 alfa-2 (Códigos de país de dos letras.).
cantemistDstDict = {
'raw_text': '',
'topic': '',
'speciallity': '',
'raw_text_type': 'clinic_case',
'topic_type': '',
'source': DATASET_SOURCE_ID,
'country': 'es',
'document_id': ''
}
totalOfTokens = 0
corpusToLoad = []
countCopySeveralDocument = 0
counteOriginalDocument = 0
#print (dataset_CODING['train'][5]['entities'])
# for item in dataset_CODING['train']:
# for passage in item['passages']:
# print ("Keys " + str( passage.keys()))
# print("Clinical case type " + str(passage['text']))
for iDataset in dataset_CODING:
for item in dataset_CODING[iDataset]:
for passageItem in item['passages']:
#print ("Element in dataset")
idFile = passageItem['id'] + '_' + str(iDataset)
text = passageItem['text'][0]
#Find topic or diagnosti clasification about the text
counteOriginalDocument += 1
listOfTokens = tokenizer.tokenize(text)
currentSizeOfTokens = len(listOfTokens)
totalOfTokens += currentSizeOfTokens
newCorpusRow = cantemistDstDict.copy()
#print('Current text has ', currentSizeOfTokens)
#print('Total of tokens is ', totalOfTokens)
newCorpusRow['raw_text'] = text
newCorpusRow['document_id'] = str(idFile)
corpusToLoad.append(newCorpusRow)
df = pd.DataFrame.from_records(corpusToLoad)
if os.path.exists(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl"):
os.remove(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl")
df.to_json(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", orient="records", lines=True)
print(
f"Downloaded all the issues for {DATASET_TO_LOAD}! Dataset stored at {issues_path}/spanish_medical_llms.jsonl"
)
print(' On dataset there are as document ', counteOriginalDocument)
print(' On dataset there are as copy document ', countCopySeveralDocument)
print(' On dataset there are as size of Tokens ', totalOfTokens)
file = Path(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") # or Path('./doc.txt')
size = file.stat().st_size
print ('File size on Kilobytes (kB)', size >> 10) # 5242880 kilobytes (kB)
print ('File size on Megabytes (MB)', size >> 20 ) # 5120 megabytes (MB)
print ('File size on Gigabytes (GB)', size >> 30 ) # 5 gigabytes (GB)
#Once the issues are downloaded we can load them locally using our
local_spanish_dataset = load_dataset("json", data_files=f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", split="train")
##Update local dataset with cloud dataset
try:
spanish_dataset = load_dataset(DATASET_TO_UPDATE, split="train")
spanish_dataset = concatenate_datasets([spanish_dataset, local_spanish_dataset])
except Exception:
spanish_dataset = local_spanish_dataset
spanish_dataset.push_to_hub(DATASET_TO_UPDATE)
print(spanish_dataset)
# Augmenting the dataset
#Importan if exist element on DATASET_TO_UPDATE we must to update element
# in list, and review if the are repeted elements
|