Extractive Question Answering application.
import re
from transformers import pipeline
qa = pipeline(
"question-answering", model="raphaelsty/carbonblog", tokenizer="raphaelsty/carbonblog"
)
def clean(document):
"""Pre-process the document."""
document = re.sub("[^a-zA-Z0-9 \n\.]", " ", document)
document = re.sub("\s\s+", " ", document)
# [NONE] allows the model to handle missing components.
document = f"[NONE] {document.lower()}"
return document
document = clean("body: 83% nylon 17% spandex; lace: 86% nylon 14% spandex")
qa({"question": "What is the 1 component ?", "context": document})
{"score": 0.9999976754188538, "start": 32, "end": 36, "answer": "lace"}
- Downloads last month
- 16
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social
visibility and check back later, or deploy to Inference Endpoints (dedicated)
instead.