import cv2 import numpy as np import mediapipe as mp import gradio as gr mp_selfie_segmentation = mp.solutions.selfie_segmentation BG_COLOR = (255, 255, 255) def selfie_segmentation(image): # Wenden Sie das Modell auf das Eingabebild an with mp_selfie_segmentation.SelfieSegmentation( model_selection=0) as selfie_segmentation: image = cv2.imread(image) image_height, image_width, _ = image.shape # Convert the BGR image to RGB before processing. results = selfie_segmentation.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) condition = np.stack((results.segmentation_mask,) * 3, axis=-1) > 0.1 bg_image = np.zeros(image.shape, dtype=np.uint8) bg_image[:] = BG_COLOR image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) return np.where(condition, image, bg_image) #im = Image.fromarray(output_image) #img_var /= im.save("example.png") #bla = "/content/example.png" #bla = Image.open("/content/example.png") #return img_var #pil_image = Image.fromarray(output_image) #byte_io = BytesIO() #pil_image.save(byte_io, format='PNG') #byte_io.seek(0) #return byte_io.read() # Start a interface in gradio iface = gr.Interface(fn=selfie_segmentation, inputs="image", outputs="image") # starten Sie die Schnittstelle iface.launch()