SalmanML's picture
Update app.py
582a7fc verified
raw
history blame
No virus
2.15 kB
# from flair.data import Sentence
# from flair.models import SequenceTagger
# import streamlit as st
# # load tagger
# tagger = SequenceTagger.load("flair/ner-english-large")
# # make example sentence
# text=st.text_area("Enter the text to detect it's named entities")
# sentence = Sentence(text)
# # predict NER tags
# tagger.predict(sentence)
# # print sentence
# print(sentence)
# # print predicted NER spans
# print('The following NER tags are found:')
# # iterate over entities and printx
# for entity in sentence.get_spans('ner'):
# print(entity)
import easyocr
import cv2
import requests
import re
from PIL import Image
## Image uploading function ##
def image_upload_and_ocr(reader):
uploaded_file=st.file_uploader(label=':red[**please upload a busines card** :sunglasses:]',type=['jpeg','jpg','png','webp'])
if uploaded_file is not None:
image=Image.open(uploaded_file)
image=image.resize((640,480))
result = reader.readtext(image)
result2=result
texts = [item[1] for item in result]
result=' '.join(texts)
return result,result2
### DRAWING DETECTION FUNCTION ###
def drawing_detection(image):
# Draw bounding boxes around the detected text regions
for detection in image:
# Extract the bounding box coordinates
points = detection[0] # List of points defining the bounding box
x1, y1 = int(points[0][0]), int(points[0][1]) # Top-left corner
x2, y2 = int(points[2][0]), int(points[2][1]) # Bottom-right corner
# Draw the bounding box
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
# Add the detected text
text = detection[1]
cv2.putText(image, text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
st.image(image,caption='Detected text on the card ',width=710)
return image
# Load the EasyOCR reader
reader = easyocr.Reader(['en'])
st.title("_Business_ card data extractor using opencv and streamlit :sunglasses:")
result,result2=image_upload_and_ocr(reader)
darwing_image=drawing_detection(result2)