OCR-Translator / app.py
Sarath0x8f's picture
Update app.py
261f4cb verified
raw
history blame
3.96 kB
import gradio as gr
import ObjCharRec
from deep_translator import GoogleTranslator
import markdown as md
import translate_speak
import base64
langs_list = GoogleTranslator().get_supported_languages()
langs_dict = GoogleTranslator().get_supported_languages(as_dict=True)
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
# Encode the images
github_logo_encoded = encode_image("Images/github-logo.png")
linkedin_logo_encoded = encode_image("Images/linkedin-logo.png")
website_logo_encoded = encode_image("Images/ai-logo.png")
usecase_img_encoded = encode_image("Images/UML/Usecase.png")
class_img_encoded = encode_image("Images/UML/class.png")
object_img_encoded = encode_image("Images/UML/object.png")
sequence_img_encoded = encode_image("Images/UML/sequence.png")
component_img_encoded = encode_image("Images/UML/component.png")
colab_img_encoded = encode_image("Images/UML/colab.png")
activity_img_encoded = encode_image("Images/UML/activity.png")
with gr.Blocks(theme=gr.themes.Ocean(font=[gr.themes.GoogleFont("Noto Sans")]), css='footer {visibility: hidden}') as main_interface:
gr.Markdown("# Welcome to The Linguistic Lens 👓🗣️")
with gr.Tabs():
with gr.TabItem("Intro"):
gr.HTML(md.description)
gr.HTML(md.usecase_diagram.format(usecase_img_encoded))
gr.HTML(md.class_diagram.format(class_img_encoded))
gr.HTML(md.object_diagram.format(object_img_encoded))
gr.HTML(md.sequence_diagram.format(sequence_img_encoded))
gr.HTML(md.colab_diagram.format(colab_img_encoded))
gr.HTML(md.activity_diagram.format(activity_img_encoded))
gr.HTML(md.component_diagram.format(component_img_encoded))
with gr.TabItem("⭐Translator"):
with gr.Row():
with gr.Column():
with gr.Row():
image_input = gr.Image(label="Upload Image")
with gr.Row():
clear_btn = gr.ClearButton()
submit_btn = gr.Button("Submit")
with gr.Column():
with gr.Row():
output_text = gr.Text(label="Output")
audio_out = gr.Audio(label="Streamed Audio")
lang_drop = gr.Dropdown(langs_dict, label="language", interactive=True)
translate_btn = gr.Button("Translate")
with gr.Row():
translated_txt = gr.Text(label="translated text")
translated_out = gr.Audio(label="Streamed Audio")
submit_btn.click(fn=ObjCharRec.ocr_with_paddle, inputs=image_input, outputs=[output_text, audio_out])
translate_btn.click(fn=translate_speak.translate_txt, inputs=[lang_drop, output_text],
outputs=[translated_txt, translated_out])
clear_btn.click(lambda: [None] * 5, outputs=[image_input, output_text, translated_txt, translated_out, audio_out])
with gr.TabItem("Simple OCR"):
gr.Markdown("Paddle OCR")
with gr.Row():
with gr.Column():
image_input = gr.Image(label="Upload Image")
with gr.Row():
clear_btn = gr.ClearButton()
submit_btn = gr.Button("Submit")
output_text = gr.Text(label="Output")
submit_btn.click(fn=ObjCharRec.ocr_with_paddle, inputs=image_input, outputs=output_text)
clear_btn.click(lambda :[None]*2, outputs=[image_input, output_text])
gr.HTML(md.footer.format(github_logo_encoded, linkedin_logo_encoded, website_logo_encoded))
# gr.Markdown(md.footer.format(github_logo_encoded, linkedin_logo_encoded, website_logo_encoded))
if __name__ == "__main__":
main_interface.launch()