Svg-Tracer-New / app.py
gdTharusha's picture
Update app.py
3041d56 verified
raw
history blame
8.61 kB
import gradio as gr
from PIL import Image, ImageFilter
import numpy as np
import io
import tempfile
import vtracer
from skimage import feature, filters, morphology
import cv2
from rembg import remove
import torch
from transformers import AutoModelForImageSegmentation, AutoProcessor
import requests
from huggingface_hub import hf_hub_download
# Load additional Hugging Face models
segmentation_model = AutoModelForImageSegmentation.from_pretrained("facebook/dino-vitb16")
segmentation_processor = AutoProcessor.from_pretrained("facebook/dino-vitb16")
def preprocess_image(image, blur_radius, sharpen_radius, noise_reduction, detail_level, edge_method, color_quantization, enhance_with_ai, remove_bg):
"""Advanced preprocessing of the image before vectorization."""
try:
if blur_radius > 0:
image = image.filter(ImageFilter.GaussianBlur(blur_radius))
if sharpen_radius > 0:
image = image.filter(ImageFilter.UnsharpMask(radius=sharpen_radius, percent=150, threshold=3))
if noise_reduction > 0:
image_np = np.array(image)
image_np = cv2.fastNlMeansDenoisingColored(image_np, None, h=noise_reduction, templateWindowSize=7, searchWindowSize=21)
image = Image.fromarray(image_np)
if detail_level > 0:
sigma = max(0.5, 3.0 - (detail_level * 0.5))
image_np = np.array(image.convert('L'))
if edge_method == 'Canny':
edges = feature.canny(image_np, sigma=sigma)
elif edge_method == 'Sobel':
edges = filters.sobel(image_np)
elif edge_method == 'Scharr':
edges = filters.scharr(image_np)
else: # Prewitt
edges = filters.prewitt(image_np)
edges = morphology.dilation(edges, morphology.square(max(1, 6 - detail_level)))
edges_img = Image.fromarray((edges * 255).astype(np.uint8))
image = Image.blend(image.convert('RGB'), edges_img.convert('RGB'), alpha=0.5)
if color_quantization > 0:
image = quantize_colors(image, color_quantization)
if enhance_with_ai:
image_np = np.array(image)
# AI-based enhancement for smoothing edges and improving vectorization
image_np = remove(image_np)
image = Image.fromarray(image_np)
if remove_bg:
image_np = np.array(image)
image_np = remove(image_np)
image = Image.fromarray(image_np)
except Exception as e:
print(f"Error during preprocessing: {e}")
raise
return image
def vectorize_with_hf_model(image):
"""Vectorizes the image using a Hugging Face model for segmentation or enhancement."""
inputs = segmentation_processor(images=image, return_tensors="pt")
outputs = segmentation_model(**inputs)
mask = outputs["masks"][0][0].cpu().detach().numpy()
mask = (mask > 0.5).astype(np.uint8) * 255
mask_image = Image.fromarray(mask)
return mask_image
def convert_image(image, blur_radius, sharpen_radius, noise_reduction, detail_level, edge_method, color_quantization,
color_mode, hierarchical, mode, filter_speckle, color_precision, layer_difference,
corner_threshold, length_threshold, max_iterations, splice_threshold, path_precision,
enhance_with_ai, remove_bg, model_choice):
"""Convert an image to SVG using vtracer with customizable and advanced parameters."""
# Preprocess the image with additional detail level settings
image = preprocess_image(image, blur_radius, sharpen_radius, noise_reduction, detail_level, edge_method, color_quantization, enhance_with_ai, remove_bg)
# If a specific model is chosen, use it to process the image before vectorization
if model_choice == "Hugging Face Segmentation Model":
image = vectorize_with_hf_model(image)
# Convert Gradio image to bytes for vtracer compatibility
img_byte_array = io.BytesIO()
image.save(img_byte_array, format='PNG')
img_bytes = img_byte_array.getvalue()
try:
# Perform the conversion
svg_str = vtracer.convert_raw_image_to_svg(
img_bytes,
img_format='png',
colormode=color_mode.lower(),
hierarchical=hierarchical.lower(),
mode=mode.lower(),
filter_speckle=int(filter_speckle),
color_precision=int(color_precision),
layer_difference=int(layer_difference),
corner_threshold=int(corner_threshold),
length_threshold=float(length_threshold),
max_iterations=int(max_iterations),
splice_threshold=int(splice_threshold),
path_precision=int(path_precision)
)
# Save the SVG string to a temporary file
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.svg')
temp_file.write(svg_str.encode('utf-8'))
temp_file.close()
# Display the SVG in the Gradio interface and provide the download link
svg_html = f'<svg viewBox="0 0 {image.width} {image.height}">{svg_str}</svg>'
return gr.HTML(svg_html), temp_file.name
except Exception as e:
print(f"Error during vectorization: {e}")
return f"Error: {e}", None
# Gradio interface
iface = gr.Blocks()
with iface:
gr.Markdown("# Super-Advanced Image to SVG Converter with Enhanced Models")
with gr.Row():
image_input = gr.Image(type="pil", label="Upload Image")
blur_radius_input = gr.Slider(minimum=0, maximum=10, value=0, step=0.1, label="Blur Radius (for smoothing)")
sharpen_radius_input = gr.Slider(minimum=0, maximum=5, value=0, step=0.1, label="Sharpen Radius")
noise_reduction_input = gr.Slider(minimum=0, maximum=30, value=0, step=1, label="Noise Reduction")
enhance_with_ai_input = gr.Checkbox(label="AI Edge Enhance", value=False)
remove_bg_input = gr.Checkbox(label="Remove Background", value=False)
with gr.Row():
detail_level_input = gr.Slider(minimum=0, maximum=10, value=5, step=1, label="Detail Level")
edge_method_input = gr.Radio(choices=["Canny", "Sobel", "Scharr", "Prewitt"], value="Canny", label="Edge Detection Method")
color_quantization_input = gr.Slider(minimum=2, maximum=64, value=0, step=2, label="Color Quantization (0 to disable)")
with gr.Row():
color_mode_input = gr.Radio(choices=["Color", "Binary"], value="Color", label="Color Mode")
hierarchical_input = gr.Radio(choices=["Stacked", "Cutout"], value="Stacked", label="Hierarchical")
mode_input = gr.Radio(choices=["Spline", "Polygon", "None"], value="Spline", label="Mode")
with gr.Row():
filter_speckle_input = gr.Slider(minimum=1, maximum=100, value=4, step=1, label="Filter Speckle")
color_precision_input = gr.Slider(minimum=1, maximum=100, value=6, step=1, label="Color Precision")
layer_difference_input = gr.Slider(minimum=1, maximum=100, value=16, step=1, label="Layer Difference")
with gr.Row():
corner_threshold_input = gr.Slider(minimum=1, maximum=100, value=60, step=1, label="Corner Threshold")
length_threshold_input = gr.Slider(minimum=1, maximum=100, value=4.0, step=0.5, label="Length Threshold")
max_iterations_input = gr.Slider(minimum=1, maximum=100, value=10, step=1, label="Max Iterations")
with gr.Row():
splice_threshold_input = gr.Slider(minimum=1, maximum=100, value=45, step=1, label="Splice Threshold")
path_precision_input = gr.Slider(minimum=1, maximum=100, value=8, step=1, label="Path Precision")
model_choice_input = gr.Radio(choices=["None", "Hugging Face Segmentation Model"], value="None", label="Choose Model")
convert_button = gr.Button("Convert Image to SVG")
svg_output = gr.HTML(label="SVG Output")
download_output = gr.File(label="Download SVG")
convert_button.click(
fn=convert_image,
inputs=[
image_input, blur_radius_input, sharpen_radius_input, noise_reduction_input, detail_level_input, edge_method_input, color_quantization_input,
color_mode_input, hierarchical_input, mode_input, filter_speckle_input, color_precision_input,
layer_difference_input, corner_threshold_input, length_threshold_input, max_iterations_input,
splice_threshold_input, path_precision_input, enhance_with_ai_input, remove_bg_input, model_choice_input
],
outputs=[svg_output, download_output]
)
iface.launch()