import json import logging from functools import partial from typing import Any, Optional, Tuple import gradio as gr from pie_modules.document.processing import tokenize_document from pie_modules.documents import TokenDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions from pie_modules.models import * # noqa: F403 from pie_modules.taskmodules import * # noqa: F403 from pytorch_ie import Pipeline from pytorch_ie.annotations import LabeledSpan from pytorch_ie.auto import AutoPipeline from pytorch_ie.documents import TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions from pytorch_ie.models import * # noqa: F403 from pytorch_ie.taskmodules import * # noqa: F403 from rendering_utils import render_displacy, render_pretty_table from transformers import AutoModel, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer logger = logging.getLogger(__name__) RENDER_WITH_DISPLACY = "displaCy + highlighted arguments" RENDER_WITH_PRETTY_TABLE = "Pretty Table" DEFAULT_MODEL_NAME = "ArneBinder/sam-pointer-bart-base-v0.3" DEFAULT_MODEL_REVISION = "76300f8e534e2fcf695f00cb49bba166739b8d8a" # local path # DEFAULT_MODEL_NAME = "models/dataset-sciarg/task-ner_re/v0.3/2024-05-28_23-33-46" # DEFAULT_MODEL_REVISION = None DEFAULT_EMBEDDING_MODEL_NAME = "allenai/scibert_scivocab_uncased" def embed_text_annotations( document: TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, text_layer_name: str, ) -> dict: # to not modify the original document document = document.copy() # tokenize_document does not yet consider predictions, so we need to add them manually document[text_layer_name].extend(document[text_layer_name].predictions.clear()) added_annotations = [] # TODO: set return_overflowing_tokens=True and max_length=...? tokenizer_kwargs = {} tokenized_documents = tokenize_document( document, tokenizer=tokenizer, result_document_type=TokenDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions, partition_layer="labeled_partitions", added_annotations=added_annotations, **tokenizer_kwargs, ) # just tokenize again to get tensors in the correct format for the model model_inputs = tokenizer(document.text, return_tensors="pt", **tokenizer_kwargs) assert len(model_inputs.encodings) == len(tokenized_documents) model_output = model(**model_inputs) # get embeddings for all text annotations embeddings = {} for batch_idx in range(len(model_output.last_hidden_state)): text2tok_ann = added_annotations[batch_idx][text_layer_name] tok2text_ann = {v: k for k, v in text2tok_ann.items()} for tok_ann in tokenized_documents[batch_idx].labeled_spans: # skip "empty" annotations if tok_ann.start == tok_ann.end: continue # use the max pooling strategy to get a single embedding for the annotation text embedding = model_output.last_hidden_state[batch_idx, tok_ann.start : tok_ann.end].max( dim=0 )[0] text_ann = tok2text_ann[tok_ann] if text_ann in embeddings: logger.warning( f"Overwriting embedding for annotation '{text_ann}' (do you use striding?)" ) embeddings[text_ann] = embedding return embeddings def predict( text: str, pipeline: Pipeline, embedding_model: Optional[PreTrainedModel] = None, embedding_tokenizer: Optional[PreTrainedTokenizer] = None, ) -> Tuple[dict, str]: document = TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions(text=text) # add single partition from the whole text (the model only considers text in partitions) document.labeled_partitions.append(LabeledSpan(start=0, end=len(text), label="text")) # execute prediction pipeline pipeline(document) document_dict = document.asdict() if embedding_model is not None and embedding_tokenizer is not None: adu_embeddings = embed_text_annotations( document=document, model=embedding_model, tokenizer=embedding_tokenizer, text_layer_name="labeled_spans", ) # convert keys to str because JSON keys must be strings adu_embeddings_dict = {str(k._id): v.detach().tolist() for k, v in adu_embeddings.items()} document_dict["embeddings"] = adu_embeddings_dict else: gr.Warning( "No embedding model provided. Skipping embedding extraction. You can load an embedding model in the 'Model Configuration' section." ) # Return as dict and JSON string. The latter is required because the JSON component converts floats # to ints which destroys de-serialization of the document (the scores of the annotations need to be floats) return document_dict, json.dumps(document_dict) def render(document_txt: str, render_with: str, render_kwargs_json: str) -> str: document_dict = json.loads(document_txt) # remove embeddings from document_dict to make it de-serializable document_dict.pop("embeddings", None) document = TextDocumentWithLabeledSpansBinaryRelationsAndLabeledPartitions.fromdict( document_dict ) render_kwargs = json.loads(render_kwargs_json) if render_with == RENDER_WITH_PRETTY_TABLE: html = render_pretty_table(document, **render_kwargs) elif render_with == RENDER_WITH_DISPLACY: html = render_displacy(document, **render_kwargs) else: raise ValueError(f"Unknown render_with value: {render_with}") return html def add_to_index( output_txt: str, doc_id: str, processed_documents: dict, vector_store: Any ) -> None: try: if doc_id in processed_documents: gr.Warning(f"Document {doc_id} already in index. Overwriting.") output = json.loads(output_txt) # get the embeddings from the output and remove them from the output embeddings = output.pop("embeddings") # save the processed document to the index processed_documents[doc_id] = output # save the embeddings to the vector store for adu_id, embedding in embeddings.items(): emb_id = f"{doc_id}:{adu_id}" # TODO: save embedding to vector store at emb_id (embedding is a list of 768 floats) gr.Info( f"Added document {doc_id} to index (index contains {len(processed_documents)} entries). (NOT YET IMPLEMENTED)" ) except Exception as e: raise gr.Error(f"Failed to add document {doc_id} to index: {e}") def open_accordion(): return gr.Accordion(open=True) def close_accordion(): return gr.Accordion(open=False) def load_argumentation_model(model_name: str, revision: Optional[str] = None) -> Pipeline: try: model = AutoPipeline.from_pretrained( model_name, device=-1, num_workers=0, taskmodule_kwargs=dict(revision=revision), model_kwargs=dict(revision=revision), ) except Exception as e: raise gr.Error(f"Failed to load argumentation model: {e}") gr.Info(f"Loaded argumentation model: model_name={model_name}, revision={revision})") return model def load_embedding_model(model_name: str) -> Tuple[PreTrainedModel, PreTrainedTokenizer]: try: embedding_model = AutoModel.from_pretrained(model_name) embedding_tokenizer = AutoTokenizer.from_pretrained(model_name) except Exception as e: raise gr.Error(f"Failed to load embedding model: {e}") gr.Info(f"Loaded embedding model: model_name={model_name})") return embedding_model, embedding_tokenizer def load_models( model_name: str, revision: Optional[str] = None, embedding_model_name: Optional[str] = None ) -> Tuple[Pipeline, Optional[PreTrainedModel], Optional[PreTrainedTokenizer]]: argumentation_model = load_argumentation_model(model_name, revision) embedding_model = None embedding_tokenizer = None if embedding_model_name is not None and embedding_model_name.strip(): embedding_model, embedding_tokenizer = load_embedding_model(embedding_model_name) return argumentation_model, embedding_model, embedding_tokenizer def main(): example_text = "Scholarly Argumentation Mining (SAM) has recently gained attention due to its potential to help scholars with the rapid growth of published scientific literature. It comprises two subtasks: argumentative discourse unit recognition (ADUR) and argumentative relation extraction (ARE), both of which are challenging since they require e.g. the integration of domain knowledge, the detection of implicit statements, and the disambiguation of argument structure. While previous work focused on dataset construction and baseline methods for specific document sections, such as abstract or results, full-text scholarly argumentation mining has seen little progress. In this work, we introduce a sequential pipeline model combining ADUR and ARE for full-text SAM, and provide a first analysis of the performance of pretrained language models (PLMs) on both subtasks. We establish a new SotA for ADUR on the Sci-Arg corpus, outperforming the previous best reported result by a large margin (+7% F1). We also present the first results for ARE, and thus for the full AM pipeline, on this benchmark dataset. Our detailed error analysis reveals that non-contiguous ADUs as well as the interpretation of discourse connectors pose major challenges and that data annotation needs to be more consistent." print("Loading argumentation model ...") argumentation_model = load_argumentation_model( model_name=DEFAULT_MODEL_NAME, revision=DEFAULT_MODEL_REVISION ) default_render_kwargs = { "entity_options": { # we need to convert the keys to uppercase because the spacy rendering function expects them in uppercase "colors": { "own_claim".upper(): "#009933", "background_claim".upper(): "#99ccff", "data".upper(): "#993399", } }, "colors_hover": { "selected": "#ffa", # "tail": "#aff", "tail": { # green "supports": "#9f9", # red "contradicts": "#f99", # do not highlight "parts_of_same": None, }, "head": None, # "#faf", "other": None, }, } # TODO: setup the vector store vector_store = None with gr.Blocks() as demo: processed_documents_state = gr.State(dict()) vector_store_state = gr.State(vector_store) # wrap the pipeline and the embedding model/tokenizer in a tuple to avoid that it gets called models_state = gr.State((argumentation_model, None, None)) with gr.Row(): with gr.Column(scale=1): doc_id = gr.Textbox( label="Document ID", value="user_input", ) text = gr.Textbox( label="Text", lines=20, value=example_text, ) with gr.Accordion("Model Configuration", open=False): model_name = gr.Textbox( label="Model Name", value=DEFAULT_MODEL_NAME, ) model_revision = gr.Textbox( label="Model Revision", value=DEFAULT_MODEL_REVISION, ) embedding_model_name = gr.Textbox( label=f"Embedding Model Name (e.g. {DEFAULT_EMBEDDING_MODEL_NAME})", value="", ) load_models_btn = gr.Button("Load Models") load_models_btn.click( fn=load_models, inputs=[model_name, model_revision, embedding_model_name], outputs=models_state, ) predict_btn = gr.Button("Analyse") output_txt = gr.Textbox(visible=False) add_to_index_btn = gr.Button("Add current result to Index") with gr.Column(scale=1): with gr.Accordion("See plain result ...", open=False) as output_accordion: output_json = gr.JSON(label="Model Output") with gr.Accordion("Render Options", open=False): render_as = gr.Dropdown( label="Render with", choices=[RENDER_WITH_PRETTY_TABLE, RENDER_WITH_DISPLACY], value=RENDER_WITH_DISPLACY, ) render_kwargs = gr.Textbox( label="Render Arguments", lines=5, value=json.dumps(default_render_kwargs, indent=2), ) render_btn = gr.Button("Re-render") rendered_output = gr.HTML(label="Rendered Output") render_button_kwargs = dict( fn=render, inputs=[output_txt, render_as, render_kwargs], outputs=rendered_output ) def _predict( text: str, models: Tuple[Pipeline, Optional[PreTrainedModel], Optional[PreTrainedTokenizer]], ) -> Tuple[dict, str]: return predict(text, *models) predict_btn.click(open_accordion, inputs=[], outputs=[output_accordion]).then( fn=_predict, inputs=[text, models_state], outputs=[output_json, output_txt], api_name="predict", ).success(**render_button_kwargs).success( close_accordion, inputs=[], outputs=[output_accordion] ) render_btn.click(**render_button_kwargs, api_name="render") add_to_index_btn.click( fn=add_to_index, inputs=[output_txt, doc_id, processed_documents_state, vector_store_state], outputs=[], ) js = """ () => { function maybeSetColor(entity, colorAttributeKey, colorDictKey) { var color = entity.getAttribute('data-color-' + colorAttributeKey); // if color is a json string, parse it and use the value at colorDictKey try { const colors = JSON.parse(color); color = colors[colorDictKey]; } catch (e) {} if (color) { console.log('setting color', color); console.log('entity', entity); entity.style.backgroundColor = color; entity.style.color = '#000'; } } function highlightRelationArguments(entityId) { const entities = document.querySelectorAll('.entity'); // reset all entities entities.forEach(entity => { const color = entity.getAttribute('data-color-original'); entity.style.backgroundColor = color; entity.style.color = ''; }); if (entityId !== null) { var visitedEntities = new Set(); // highlight selected entity const selectedEntity = document.getElementById(entityId); if (selectedEntity) { const label = selectedEntity.getAttribute('data-label'); maybeSetColor(selectedEntity, 'selected', label); visitedEntities.add(selectedEntity); } // highlight tails const relationTailsAndLabels = JSON.parse(selectedEntity.getAttribute('data-relation-tails')); relationTailsAndLabels.forEach(relationTail => { const tailEntity = document.getElementById(relationTail['entity-id']); if (tailEntity) { const label = relationTail['label']; maybeSetColor(tailEntity, 'tail', label); visitedEntities.add(tailEntity); } }); // highlight heads const relationHeadsAndLabels = JSON.parse(selectedEntity.getAttribute('data-relation-heads')); relationHeadsAndLabels.forEach(relationHead => { const headEntity = document.getElementById(relationHead['entity-id']); if (headEntity) { const label = relationHead['label']; maybeSetColor(headEntity, 'head', label); visitedEntities.add(headEntity); } }); // highlight other entities entities.forEach(entity => { if (!visitedEntities.has(entity)) { const label = entity.getAttribute('data-label'); maybeSetColor(entity, 'other', label); } }); } } const entities = document.querySelectorAll('.entity'); entities.forEach(entity => { const alreadyHasListener = entity.getAttribute('data-has-listener'); if (alreadyHasListener) { return; } entity.addEventListener('mouseover', () => { highlightRelationArguments(entity.id); }); entity.addEventListener('mouseout', () => { highlightRelationArguments(null); }); entity.setAttribute('data-has-listener', 'true'); }); } """ rendered_output.change(fn=None, js=js, inputs=[], outputs=[]) demo.launch() if __name__ == "__main__": # configure logging logging.basicConfig() main()