Spaces:
Runtime error
Runtime error
import os | |
import random | |
import string | |
import gradio as gr | |
import torch | |
from transformers import pipeline, set_seed | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import logging | |
# Monkey patch | |
import inspect | |
from gradio import routes | |
from typing import List, Type | |
def get_types(cls_set: List[Type], component: str): | |
docset = [] | |
types = [] | |
if component == "input": | |
for cls in cls_set: | |
doc = inspect.getdoc(cls) | |
doc_lines = doc.split("\n") | |
docset.append(doc_lines[1].split(":")[-1]) | |
types.append(doc_lines[1].split(")")[0].split("(")[-1]) | |
else: | |
for cls in cls_set: | |
doc = inspect.getdoc(cls) | |
doc_lines = doc.split("\n") | |
docset.append(doc_lines[-1].split(":")[-1]) | |
types.append(doc_lines[-1].split(")")[0].split("(")[-1]) | |
return docset, types | |
routes.get_types = get_types | |
logger = logging.getLogger() | |
logger.addHandler(logging.StreamHandler()) | |
DEBUG = os.environ.get("DEBUG", "false")[0] in "ty1" | |
HF_AUTH_TOKEN = os.environ.get("HF_AUTH_TOKEN", None) | |
DEVICE = os.environ.get("DEVICE", "cpu") # cuda:0 | |
if DEVICE != "cpu" and not torch.cuda.is_available(): | |
DEVICE = "cpu" | |
logger.info(f"DEVICE {DEVICE}") | |
DTYPE = torch.float32 if DEVICE == "cpu" else torch.float16 | |
MODEL_NAME = os.environ.get("MODEL_NAME", "bertin-project/bertin-gpt-j-6B") | |
MODEL_REVISION = os.environ.get("MODEL_REVISION", "main") | |
MAX_LENGTH = int(os.environ.get("MAX_LENGTH", 1024)) | |
HEADER_INFO = """ | |
# BERTIN GPT-J-6B | |
Spanish BERTIN GPT-J-6B Model. | |
""".strip() | |
LOGO = "https://huggingface.co/bertin-project/bertin-roberta-base-spanish/resolve/main/images/bertin.png" | |
HEADER = f""" | |
<link href="https://fonts.googleapis.com/css2?family=Roboto:wght@300&display=swap%22%20rel=%22stylesheet%22" rel="stylesheet"> | |
<style> | |
.ltr, | |
textarea {{ | |
font-family: Roboto !important; | |
text-align: left; | |
direction: ltr !important; | |
}} | |
.ltr-box {{ | |
border-bottom: 1px solid #ddd; | |
padding-bottom: 20px; | |
}} | |
.rtl {{ | |
text-align: left; | |
direction: ltr !important; | |
}} | |
span.result-text {{ | |
padding: 3px 3px; | |
line-height: 32px; | |
}} | |
span.generated-text {{ | |
background-color: rgb(118 200 147 / 13%); | |
}} | |
</style> | |
<div align=center> | |
<img src="{LOGO}" width=150/> | |
# BERTIN GPT-J-6B | |
BERTIN proporciona una serie de modelos de lenguaje en Español entrenados en abierto. | |
Este modelo ha sido entrenado con [Mesh Transformer JAX](https://github.com/kingoflolz/mesh-transformer-jax) en TPUs proporcionadas por Google a través del programa Tensor Research Cloud, a partir del modelo [GPT-J de EleutherAI](https://huggingface.co/EleutherAI/gpt-j-6B) con el corpus [mC4-es-sampled (gaussian)](https://huggingface.co/datasets/bertin-project/mc4-es-sampled). Esta demo funciona sobre una GPU proporcionada por HuggingFace. | |
</div> | |
""" | |
FOOTER = """ | |
<div align=center> | |
Para más información, visite el repositorio del modelo: <a href="https://huggingface.co/bertin-project/bertin-gpt-j-6B">BERTIN-GPT-J-6B</a>. | |
<img src="https://visitor-badge.glitch.me/badge?page_id=spaces/bertin-project/bertin-gpt-j-6B"/> | |
<div align=center> | |
""".strip() | |
EXAMPLES = [ | |
"", | |
"Érase una vez,", | |
"¿Cuál es la capital de Francia? Respuesta:", | |
"""Los templos egipcios fueron construidos para el culto oficial de los dioses y la conmemoración de los faraones del Antiguo Egipto en las regiones bajo su dominio. Los templos eran vistos como el hogar de los dioses o faraones deificados a quienes eran dedicados, y en ellos los faraones y el clero egipcio llevaban a cabo diversos rituales, las funciones centrales de la religión egipcia: realizar ofrendas a sus dioses, recrear pasajes mitológicos mediante festivales y protegerse de las fuerzas del caos. Estos rituales eran vistos como necesarios para que los dioses mantuvieran la maat, el orden divino del universo. | |
El cuidado del hogar de los dioses era obligación de los faraones, que dedicaron ingentes cantidades de recursos para la construcción y el mantenimiento de los templos. Por necesidad, los faraones delegaban la mayoría de los rituales en una amplia casta sacerdotal, aunque la mayor parte del pueblo llano permanecía al margen de la participación directa en las ceremonias por tener prohibido el acceso a las zonas más sagradas de los templos. A pesar de ello, el templo siempre fue un importante centro religioso para todos los egipcios, que iban a ellos a rezar, realizar ofrendas y buscar la guía de los oráculos. | |
Pregunta: ¿Quién cuidaba del hogar los dioses? | |
Respuesta:""", | |
] | |
AGENT = "BERTIN" | |
USER = "ENTREVISTADOR" | |
CONTEXT = """La siguiente conversación es un extracto de una entrevista a {AGENT} celebrada en Madrid para Radio Televisión Española: | |
{USER}: Bienvenido, {AGENT}. Un placer tenerlo hoy con nosotros. | |
{AGENT}: Gracias. El placer es mío.""" | |
class Normalizer: | |
def remove_repetitions(self, text): | |
"""Remove repetitions""" | |
first_ocurrences = [] | |
for sentence in text.split("."): | |
if sentence not in first_ocurrences: | |
first_ocurrences.append(sentence) | |
return '.'.join(first_ocurrences) | |
def trim_last_sentence(self, text): | |
"""Trim last sentence if incomplete""" | |
return text[:text.rfind(".") + 1] | |
def clean_txt(self, text): | |
return self.trim_last_sentence(self.remove_repetitions(text)) | |
class TextGeneration: | |
def __init__(self): | |
self.tokenizer = None | |
self.generator = None | |
self.task = "text-generation" | |
self.model_name_or_path = MODEL_NAME | |
set_seed(42) | |
def load(self): | |
logger.info("Loading model...") | |
self.tokenizer = AutoTokenizer.from_pretrained( | |
self.model_name_or_path, revision=MODEL_REVISION, use_auth_token=HF_AUTH_TOKEN if HF_AUTH_TOKEN else None, | |
) | |
self.model = AutoModelForCausalLM.from_pretrained( | |
self.model_name_or_path, revision=MODEL_REVISION, | |
use_auth_token=HF_AUTH_TOKEN if HF_AUTH_TOKEN else None, | |
pad_token_id=self.tokenizer.eos_token_id, eos_token_id=self.tokenizer.eos_token_id, | |
torch_dtype=DTYPE, low_cpu_mem_usage=False if DEVICE == "cpu" else True | |
).to(device=DEVICE, non_blocking=False) | |
_ = self.model.eval() | |
device_number = -1 if DEVICE == "cpu" else int(DEVICE.split(":")[-1]) | |
self.generator = pipeline(self.task, model=self.model, tokenizer=self.tokenizer, device=device_number) | |
logger.info("Loading model done.") | |
# with torch.no_grad(): | |
# tokens = tokenizer.encode(prompt, return_tensors='pt').to(device=device, non_blocking=True) | |
# gen_tokens = self.model.generate(tokens, do_sample=True, temperature=0.8, max_length=128) | |
# generated = tokenizer.batch_decode(gen_tokens)[0] | |
# return generated | |
def generate(self, text, generation_kwargs, previous_text=None): | |
input_text = previous_text or text | |
max_length = len(self.tokenizer(input_text)["input_ids"]) + generation_kwargs["max_length"] | |
generation_kwargs["max_length"] = min(max_length, self.model.config.n_positions) | |
generated_text = None | |
if input_text: | |
for _ in range(10): | |
generated_text = self.generator( | |
input_text, | |
**generation_kwargs, | |
)[0]["generated_text"] | |
if generated_text.strip().startswith(input_text): | |
generated_text = generated_text.replace(input_text, "", 1).strip() | |
if generation_kwargs["do_clean"]: | |
generated_text = cleaner.clean_txt(generated_text) | |
if generated_text: | |
if previous_text and previous_text != text: | |
diff = [ | |
(text, None), (previous_text.replace(text, " ", 1).strip(), " "), (generated_text, AGENT) | |
] | |
else: | |
diff = [(text, None), (generated_text, AGENT)] | |
return ( | |
input_text + " " + generated_text, | |
diff | |
) | |
if not generated_text: | |
return ( | |
"", | |
[("Tras 10 intentos BERTIN no generó nada. Pruebe cambiando las opciones.", "ERROR")] | |
) | |
return ( | |
"", | |
[("Debe escribir algo primero.", "ERROR")] | |
) | |
#@st.cache(hash_funcs={torch.nn.parameter.Parameter: lambda _: None}) | |
#@st.cache(allow_output_mutation=True) | |
#@st.cache(allow_output_mutation=True, hash_funcs={TextGeneration: lambda _: None}) | |
def load_text_generator(): | |
text_generator = TextGeneration() | |
text_generator.load() | |
return text_generator | |
cleaner = Normalizer() | |
generator = load_text_generator() | |
def complete_with_gpt(text, max_length, top_k, top_p, temperature, do_sample, do_clean): | |
generation_kwargs = { | |
"max_length": max_length, | |
"top_k": top_k, | |
"top_p": top_p, | |
"temperature": temperature, | |
"do_sample": do_sample, | |
"do_clean": do_clean, | |
} | |
return generator.generate(text, generation_kwargs) | |
def expand_with_gpt(hidden, text, max_length, top_k, top_p, temperature, do_sample, do_clean): | |
generation_kwargs = { | |
"max_length": max_length, | |
"top_k": top_k, | |
"top_p": top_p, | |
"temperature": temperature, | |
"do_sample": do_sample, | |
"do_clean": do_clean, | |
} | |
return generator.generate(text, generation_kwargs, previous_text=hidden) | |
def chat_with_gpt(agent, user, context, user_message, history, max_length, top_k, top_p, temperature, do_sample, do_clean): | |
# agent = AGENT | |
# user = USER | |
generation_kwargs = { | |
"max_length": 25, | |
"top_k": top_k, | |
"top_p": top_p, | |
"temperature": temperature, | |
"do_sample": do_sample, | |
"do_clean": do_clean, | |
# "num_return_sequences": 1, | |
# "return_full_text": False, | |
} | |
message = user_message.split(" ", 1)[0].capitalize() + " " + user_message.split(" ", 1)[-1] | |
history = history or [] #[(f"{user}: Bienvenido. Encantado de tenerle con nosotros.", f"{agent}: Un placer, muchas gracias por la invitación.")] | |
context = context.format(USER=user or USER, AGENT=agent or AGENT).strip() | |
if context[-1] not in ".:": | |
context += "." | |
context_length = len(context.split()) | |
history_take = 0 | |
history_context = "\n".join(f"{user}: {history_message.capitalize()}.\n{agent}: {history_response}." for history_message, history_response in history[-len(history) + history_take:]) | |
while len(history_context.split()) > generator.model.config.n_positions - (generation_kwargs["max_length"] + context_length): | |
history_take += 1 | |
history_context = "\n".join(f"{user}: {history_message.capitalize()}.\n{agent}: {history_response}." for history_message, history_response in history[-len(history) + history_take:]) | |
if history_take >= generator.model.config.n_positions: | |
break | |
context += history_context | |
for _ in range(5): | |
prompt = f"{context}\n\n{user}: {message}.\n" | |
response = generator.generate(prompt, generation_kwargs)[0] | |
if DEBUG: | |
print("\n-----\n" + response + "\n-----\n") | |
# response = response.split("\n")[-1] | |
# if agent in response and response.split(agent)[-1]: | |
# response = response.split(agent)[-1] | |
# if user in response and response.split(user)[-1]: | |
# response = response.split(user)[-1] | |
# Take the first response | |
response = [ | |
r for r in response.replace(prompt, "").split(f"{AGENT}:") if r.strip() | |
][0].split(USER)[0].replace(f"{AGENT}:", "\n").strip() | |
if response[0] in string.punctuation: | |
response = response[1:].strip() | |
if response.strip().startswith(f"{user}: {message}"): | |
response = response.strip().split(f"{user}: {message}")[-1] | |
if response.replace(".", "").strip() and message.replace(".", "").strip() != response.replace(".", "").strip(): | |
break | |
if DEBUG: | |
print() | |
print("CONTEXT:") | |
print(context) | |
print() | |
print("MESSAGE") | |
print(message) | |
print() | |
print("RESPONSE:") | |
print(response) | |
if not response.strip(): | |
response = random.choice(["No sé muy bien cómo contestar a eso.", "No estoy seguro.", "Prefiero no contestar.", "Ni idea.", "¿Podemos cambiar de tema?"]) | |
history.append((user_message, response)) | |
return history, history, "" | |
with gr.Blocks(css="#htext span {white-space: pre}") as demo: | |
gr.Markdown(HEADER) | |
with gr.Row(): | |
with gr.Group(): | |
with gr.Box(): | |
gr.Markdown("Opciones") | |
max_length = gr.Slider( | |
label='Longitud máxima', | |
# help="Número máximo (aproximado) de palabras a generar.", | |
minimum=1, | |
maximum=MAX_LENGTH, | |
value=50, | |
step=1 | |
) | |
top_k = gr.Slider( | |
label='Top-k', | |
# help="Número de palabras con alta probabilidad a mantener para el filtrado `top-k`", | |
minimum=40, | |
maximum=80, | |
value=50, | |
step=1 | |
) | |
top_p = gr.Slider( | |
label='Top-p', | |
# help="Solo las palabras más probables con probabilidades que sumen `top_p` o más se mantienen para la generación.", | |
minimum=0.0, | |
maximum=1.0, | |
value=0.95, | |
step=0.01 | |
) | |
temperature = gr.Slider( | |
label='Temperatura', | |
# help="Valor utilizado para modular las probabilidades de las siguientes palabras generadas.", | |
minimum=0.1, | |
maximum=10.0, | |
value=0.8, | |
step=0.05 | |
) | |
do_sample = gr.Checkbox( | |
label='¿Muestrear?', | |
value = True, | |
# options=(True, False), | |
# help="Si no se muestrea se usará una decodificación voraz (_greedy_).", | |
) | |
do_clean = gr.Checkbox( | |
label='¿Limpiar texto?', | |
value = True, | |
# options=(True, False), | |
# help="Si eliminar o no las palabras repetidas y recortar las últimas frases sin terminar.", | |
) | |
with gr.Column(): | |
with gr.Tabs(): | |
with gr.TabItem("Generar"): | |
textbox = gr.Textbox(label="Texto", placeholder="Escriba algo (o seleccione un ejemplo) y pulse 'Generar'...", lines=8) | |
examples = gr.Dropdown(label="Ejemplos", choices=EXAMPLES, value=None, type="value") | |
hidden = gr.Textbox(visible=False, show_label=False) | |
with gr.Box(): | |
# output = gr.Markdown() | |
output = gr.HighlightedText( | |
elem_id="htext", | |
label="Resultado", | |
combine_adjacent=True, | |
color_map={AGENT: "green", "ERROR": "red", " ": "blue"}, | |
) | |
with gr.Row(): | |
generate_btn = gr.Button("Generar") | |
generate_btn.click(complete_with_gpt, inputs=[textbox, max_length, top_k, top_p, temperature, do_sample, do_clean], outputs=[hidden, output]) | |
expand_btn = gr.Button("Añadir") | |
expand_btn.click(expand_with_gpt, inputs=[hidden, textbox, max_length, top_k, top_p, temperature, do_sample, do_clean], outputs=[hidden, output]) | |
edit_btn = gr.Button("Editar", variant="secondary") | |
edit_btn.click(lambda x: (x, "", []), inputs=[hidden], outputs=[textbox, hidden, output]) | |
clean_btn = gr.Button("Borrar", variant="secondary") | |
clean_btn.click(lambda: ("", "", [], ""), inputs=[], outputs=[textbox, hidden, output, examples]) | |
examples.change(lambda x: x, inputs=[examples], outputs=[textbox]) | |
with gr.TabItem("Charlar") as tab_chat: | |
tab_chat.select(lambda: 25, inputs=[], outputs=[max_length]) | |
context = gr.Textbox(label="Contexto", value=CONTEXT, lines=5) | |
with gr.Row(): | |
agent = gr.Textbox(label="Agente", value=AGENT) | |
user = gr.Textbox(label="Usuario", value=USER) | |
history = gr.Variable(value=[]) | |
chatbot = gr.Chatbot(color_map=("green", "gray")) | |
with gr.Row(): | |
message = gr.Textbox(placeholder="Escriba aquí su mensaje y pulse 'Enviar'", show_label=False) | |
chat_btn = gr.Button("Enviar") | |
chat_btn.click(chat_with_gpt, inputs=[agent, user, context, message, history, max_length, top_k, top_p, temperature, do_sample, do_clean], outputs=[chatbot, history, message]) | |
gr.Markdown(FOOTER) | |
with gr.Interface(lambda: None, inputs=["text", max_length, top_k, top_p, temperature, do_sample, do_clean], outputs=[hidden, output]) as iface: | |
demo.examples = None | |
demo.predict_durations = [] | |
demo.input_components = iface.input_components | |
demo.output_components = iface.output_components | |
demo.launch() | |