from transformers.utils import logging from language_directions import * logging.set_verbosity_error() import warnings warnings.filterwarnings("ignore", message="Using the model-agnostic default `max_length`") import os import gradio as gr from transformers import pipeline pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") def translate(input_text, source, target): source_readable = source if source == "Auto Detect" or source.startswith("Detected"): source, _ = auto_detect_language_code(input_text) if source in source_lang_dict.keys(): source = source_lang_dict[source] target_lang_dict, _ = get_target_languages(source) try: target = target_lang_dict[target] # Helsinki-NLP/opus-mt-en-sq model = f"Helsinki-NLP/opus-mt-{source}-{target}" pipe = pipeline("translation", model=model) translation = pipe(input_text) return translation[0]['translation_text'], "" except KeyError: return "", f"Error: Translation direction {source_readable} to {target} is not supported by Helsinki Translation Models" def launch(input): out = pipe(input) context_str = out[0]['generated_text'] translate_str = translate(context_str, 'en', 'sq') return translate_str iface = gr.Interface(launch, inputs=gr.Image(type='pil'), outputs="text") iface.launch(share=True) # iface.launch(share=True, # server_port=int(os.environ['PORT1'])) iface.close()