File size: 5,997 Bytes
39f9089 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
# -*- coding: utf-8 -*-
"""using_dataset_hugginface.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1soGxkZu4antYbYG23GioJ6zoSt_GhSNT
"""
"""**Hugginface loggin for push on Hub**"""
###
#
# Used bibliografy:
# https://huggingface.co/learn/nlp-course/chapter5/5
#
###
import os
import time
import math
from huggingface_hub import login
from datasets import load_dataset, concatenate_datasets
from functools import reduce
from pathlib import Path
import pandas as pd
import numpy as np
# Load model directly
from transformers import AutoTokenizer
HF_TOKEN = ''
DATASET_TO_LOAD = 'spanish_health_output.json'
DATASET_TO_UPDATE = 'somosnlp/spanish_medica_llm'
BAD_CHAIN = [
'es como usted puede verificarlo',
'Un sitio oficial del Gobierno de Estados Unidos',
'lo en sitios web oficiales y seguros.',
'forma segura a un sitio web .gov. Comparta informaci',
'Gobierno de Estados Unidos.',
'pertenece a una organizaci',
'(\r\n \n ) o ',
'Un sitio\r\n'
]
#Loggin to Huggin Face
login(token = HF_TOKEN)
royalListOfCode = {}
issues_path = 'dataset'
tokenizer = AutoTokenizer.from_pretrained("DeepESP/gpt2-spanish-medium")
DATASET_SOURCE_ID = '2'
#Read current path
path = Path(__file__).parent.absolute()
dataset_CODING = pd.read_json(str(path) + os.sep + DATASET_TO_LOAD, encoding="utf8")
# raw_text: Texto asociado al documento, pregunta, caso clínico u otro tipo de información.
# topic: (puede ser healthcare_treatment, healthcare_diagnosis, tema, respuesta a pregunta, o estar vacío p.ej en el texto abierto)
# speciality: (especialidad médica a la que se relaciona el raw_text p.ej: cardiología, cirugía, otros)
# raw_text_type: (puede ser caso clínico, open_text, question)
# topic_type: (puede ser medical_topic, medical_diagnostic,answer,natural_medicine_topic, other, o vacio)
# source: Identificador de la fuente asociada al documento que aparece en el README y descripción del dataset.
# country: Identificador del país de procedencia de la fuente (p.ej.; ch, es) usando el estándar ISO 3166-1 alfa-2 (Códigos de país de dos letras.).
cantemistDstDict = {
'raw_text': '',
'topic': '',
'speciallity': '',
'raw_text_type': 'open_text',
'topic_type': 'other',
'source': DATASET_SOURCE_ID,
'country': 'es',
'document_id': ''
}
def getExtraTexInformation(item, data_top_columname):
optionalTag = ["Healthtopics Name", "titles", "subtitles", "paragraphs"]
text = ""
for key in data_top_columname:
if key not in optionalTag:
if not np.isnan(item[key]) and len(item[key]) > 1:
text += str(item[key]) + '\n'
return text
totalOfTokens = 0
corpusToLoad = []
countCopySeveralDocument = 0
counteOriginalDocument = 0
data_top_columname = dataset_CODING.head()
def verifyRepetelyChain(paragraph):
return '' if len([ x for x in BAD_CHAIN if paragraph.find(x) != -1]) > 0 else paragraph
for index, item in dataset_CODING.iterrows():
if len(item['paragraphs']) > 1:
text = reduce(lambda a, b: verifyRepetelyChain(a) + "\n "+ verifyRepetelyChain(b), item['paragraphs'], "")
else:
text = getExtraTexInformation(item, data_top_columname)
#Find topic or diagnosti clasification about the text
counteOriginalDocument += 1
newCorpusRow = cantemistDstDict.copy()
#print('Current text has ', currentSizeOfTokens)
#print('Total of tokens is ', totalOfTokens)
listOfTokens = []
try:
listOfTokens = tokenizer.tokenize(text)
except Exception:
raise Exception('Error')
currentSizeOfTokens = len(listOfTokens)
totalOfTokens += currentSizeOfTokens
newCorpusRow['topic'] = item['Healthtopics Name'] if item['Healthtopics Name'] else reduce(lambda a, b: a + "\n "+ b, item['titles'], "")
newCorpusRow['raw_text'] = text
idFile = counteOriginalDocument
newCorpusRow['document_id'] = str(idFile)
corpusToLoad.append(newCorpusRow)
df = pd.DataFrame.from_records(corpusToLoad)
if os.path.exists(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl"):
os.remove(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl")
df.to_json(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", orient="records", lines=True)
print(
f"Downloaded all the issues for {DATASET_TO_LOAD}! Dataset stored at {issues_path}/spanish_medical_llms.jsonl"
)
print(' On dataset there are as document ', counteOriginalDocument)
print(' On dataset there are as copy document ', countCopySeveralDocument)
print(' On dataset there are as size of Tokens ', totalOfTokens)
file = Path(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") # or Path('./doc.txt')
size = file.stat().st_size
print ('File size on Kilobytes (kB)', size >> 10) # 5242880 kilobytes (kB)
print ('File size on Megabytes (MB)', size >> 20 ) # 5120 megabytes (MB)
print ('File size on Gigabytes (GB)', size >> 30 ) # 5 gigabytes (GB)
#Once the issues are downloaded we can load them locally using our
local_spanish_dataset = load_dataset("json", data_files=f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", split="train")
##Update local dataset with cloud dataset
try:
spanish_dataset = load_dataset(DATASET_TO_UPDATE, split="train")
new_spanish_dataset = concatenate_datasets([spanish_dataset, local_spanish_dataset])
except Exception:
print ('<== Exception ==> ')
raise Exception
#new_spanish_dataset = local_spanish_dataset
new_spanish_dataset.push_to_hub(DATASET_TO_UPDATE)
print(new_spanish_dataset)
# Augmenting the dataset
#Importan if exist element on DATASET_TO_UPDATE we must to update element
# in list, and review if the are repeted elements
#spanish_dataset.push_to_hub(DATASET_TO_UPDATE)
|