Spaces:
Sleeping
Sleeping
File size: 904 Bytes
229899d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 |
import torch
from transformers import BertTokenizer, BertForQuestionAnswering
# Load the pre-trained model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForQuestionAnswering.from_pretrained("bert-base-uncased")
def answer_query(question, context):
# Preprocess the question and context using the tokenizer
inputs = tokenizer(question, context, return_tensors="pt")
# Use the model for question answering
with torch.no_grad():
outputs = model(**inputs)
# Get start and end logits directly from model outputs
start_logits = outputs.start_logits
end_logits = outputs.end_logits
# Find the most likely answer span
answer_start = torch.argmax(start_logits)
answer_end = torch.argmax(end_logits) + 1
# Extract the answer from the context
answer = tokenizer.convert_tokens_to_string(context)[answer_start:answer_end]
return answer
|