import gradio as gr from transformers import AutoProcessor, AutoModelForCausalLM import spaces import io import base64 # Adicionando a biblioteca base64 para decodificação from PIL import Image import subprocess # Instalando a dependência flash-attn se necessário subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) # Carregando o modelo e o processador model_id = 'J-LAB/Florence-vl3' model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True).to("cuda").eval() processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True) DESCRIPTION = "# Product Describe by Fluxi IA\n### Base Model [Florence-2] (https://huggingface.co/microsoft/Florence-2-large)" @spaces.GPU def run_example(task_prompt, image): inputs = processor(text=task_prompt, images=image, return_tensors="pt").to("cuda") generated_ids = model.generate( input_ids=inputs["input_ids"], pixel_values=inputs["pixel_values"], max_new_tokens=1024, early_stopping=False, do_sample=False, num_beams=3, ) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0] parsed_answer = processor.post_process_generation( generated_text, task=task_prompt, image_size=(image.width, image.height) ) return parsed_answer # Função para processar imagens, agora suportando Base64 def process_image(image, task_prompt): # Verifica se a imagem é uma string base64 if isinstance(image, str) and image.startswith("data:image"): # Extraindo a parte base64 da string base64_image = image.split(",")[1] # Decodificando a imagem base64 image = Image.open(io.BytesIO(base64.b64decode(base64_image))) elif isinstance(image, bytes): image = Image.open(io.BytesIO(image)) else: image = Image.fromarray(image) # Convertendo um array NumPy para imagem PIL, se aplicável # Mapeando os prompts de tarefas if task_prompt == 'Product Caption': task_prompt = '' elif task_prompt == 'OCR': task_prompt = '' # Chamando o exemplo com a imagem processada e o prompt da tarefa results = run_example(task_prompt, image) # Extraindo o texto gerado a partir dos resultados if results and task_prompt in results: output_text = results[task_prompt] else: output_text = "" # Convertendo quebras de linha para quebras de linha HTML output_text = output_text.replace("\n\n", "

").replace("\n", "
") return output_text css = """ #output { overflow: auto; border: 1px solid #ccc; padding: 10px; background-color: rgb(31 41 55); color: #fff; } """ js = """ function adjustHeight() { var outputElement = document.getElementById('output'); outputElement.style.height = 'auto'; // Reset height to auto to get the actual content height var height = outputElement.scrollHeight + 'px'; // Get the scrollHeight outputElement.style.height = height; // Set the height } // Attach the adjustHeight function to the click event of the submit button document.querySelector('button').addEventListener('click', function() { setTimeout(adjustHeight, 500); // Adjust the height after a small delay to ensure content is loaded }); """ single_task_list = ['Product Caption', 'OCR'] with gr.Blocks(css=css) as demo: gr.Markdown(DESCRIPTION) with gr.Tab(label="Product Image Select"): with gr.Row(): with gr.Column(): input_img = gr.Image(label="Input Picture", source="upload", type="pil") # Suporte a PIL images task_prompt = gr.Dropdown(choices=single_task_list, label="Task Prompt", value="Product Caption") submit_btn = gr.Button(value="Submit") with gr.Column(): output_text = gr.HTML(label="Output Text", elem_id="output") gr.Markdown(""" ## How to use via API To use this model via API, you can follow the example code below: ```python import base64 from PIL import Image import io import requests # Converting image to base64 image_path = 'path_to_image.png' with open(image_path, 'rb') as image_file: image_base64 = base64.b64encode(image_file.read()).decode('utf-8') # Preparing the payload payload = { "image": f"data:image/png;base64,{image_base64}", "task_prompt": "Product Caption" } response = requests.post("http://your-space-url-here", json=payload) print(response.json()) ``` """) submit_btn.click(process_image, [input_img, task_prompt], [output_text]) demo.load(lambda: None, inputs=None, outputs=None, js=js) demo.launch(debug=True)