|
from transformers import AutoTokenizer, AutoModelForTokenClassification |
|
import torch |
|
import hazm |
|
|
|
|
|
model_name = "HooshvareLab/bert-fa-base-uncased" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForTokenClassification.from_pretrained(model_name) |
|
|
|
def add_diacritics(text): |
|
|
|
normalizer = hazm.Normalizer() |
|
text = normalizer.normalize(text) |
|
words = hazm.word_tokenize(text) |
|
|
|
|
|
inputs = tokenizer(words, return_tensors="pt", is_split_into_words=True) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs).logits |
|
|
|
|
|
predictions = torch.argmax(outputs, dim=2).tolist()[0] |
|
|
|
|
|
diacritics = {1: 'َ', 2: 'ِ', 3: 'ُ'} |
|
result = [] |
|
|
|
for word, prediction in zip(words, predictions): |
|
if prediction in diacritics: |
|
word += diacritics[prediction] |
|
result.append(word) |
|
|
|
|
|
final_text = " ".join(result) |
|
final_text = final_text.replace(" ،", "،").replace(" .", ".").replace(" ؛", "؛") |
|
|
|
return final_text |
|
|