Spaces:
Running
Running
File size: 1,352 Bytes
dbdbddf 1abadf7 dbdbddf 1abadf7 dbdbddf 1abadf7 8b21536 1abadf7 dbdbddf 1abadf7 dbdbddf 1abadf7 fe10d73 1abadf7 fe10d73 dbdbddf 1abadf7 fe10d73 3d45b3a 2c62fa3 fe10d73 dbdbddf 1abadf7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import os
from doctr.io import DocumentFile
from doctr.models import ocr_predictor, from_hub
import gradio as gr
os.environ['USE_TORCH'] = '1'
reco_model = from_hub('ayymen/crnn_mobilenet_v3_large_gen_hw')
predictor = ocr_predictor(reco_arch=reco_model, pretrained=True)
title = "Tifinagh OCR"
description = "Upload an image to get the OCR results !"
def ocr(img):
img.save("out.jpg")
doc = DocumentFile.from_images("out.jpg")
output = predictor(doc)
res = ""
for obj in output.pages:
for obj1 in obj.blocks:
for obj2 in obj1.lines:
for obj3 in obj2.words:
res=res + " " + obj3.value
res=res + "\n"
res=res + "\n\n"
_output_name = "RESULT_OCR.txt"
open(_output_name, 'w', encoding="utf-8").close() # clear file
with open(_output_name, "w", encoding="utf-8", errors="ignore") as f:
f.write(res)
print("Writing into file")
return res, _output_name
demo = gr.Interface(fn=ocr,
inputs=gr.Image(type="pil"),
outputs=["text", "file"],
title=title,
description=description,
examples=[["Examples/Book.png"],["Examples/News.png"],["Examples/Manuscript.jpg"],["Examples/Files.jpg"]]
)
demo.launch(debug=True)
|