image_editing / app.py
fahad11182's picture
Update app.py
ce73d6b verified
raw
history blame
7.06 kB
from transformers import MarianMTModel, MarianTokenizer
import torch
from diffusers import StableDiffusionInstructPix2PixPipeline
import gradio as gr
from PIL import Image
import random
# Load the InstructPix2Pix model
model_id = "timbrooks/instruct-pix2pix"
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
# Check if a GPU is available, otherwise fallback to CPU
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = pipe.to(device) # Move the model to the appropriate device
# Load the translation model (from Arabic to English)
translation_model_name = 'Helsinki-NLP/opus-mt-ar-en'
translation_tokenizer = MarianTokenizer.from_pretrained(translation_model_name)
translation_model = MarianMTModel.from_pretrained(translation_model_name)
# Initialize a random seed
seed = random.randint(0, 10000)
# Function to reset the seed (style change)
def change_style():
global seed
seed = torch.manual_seed(torch.randint(0, 10000, (1,)).item())
return f"تم تغيير النمط. المعرف الجديد: {seed}"
# Dictionary to map Arabic colors to English
arabic_to_english_colors = {
"أبيض": "White",
"أسود": "Black",
"أزرق": "Blue",
"أخضر": "Green",
"أحمر": "Red",
"أصفر": "Yellow",
"رمادي": "Gray",
"برتقالي": "Orange",
"بنفسجي": "Purple",
"وردي": "Pink",
"بني": "Brown",
"كحلي": "Navy",
"زهري": "Coral",
"فيروزي": "Teal",
"بيج": "Beige"
}
# Function to translate Arabic color to English and change the wall color
def change_color(image, color):
# Translate Arabic color to English using the dictionary
color_in_english = arabic_to_english_colors.get(color, None)
# If color not found in the dictionary, return an error message
if not color_in_english:
return f"اللون '{color}' غير موجود في القائمة. يرجى إدخال لون صحيح."
# Construct the furniture prompt in English
prompt = f"paint the walls with {color_in_english} color"
# Text CFG (guidance_scale) controls how strongly the model follows the prompt
text_cfg = 7.5
# Image CFG: Simulated value for preserving the original image content
image_cfg = 1.5
# Apply the edit using InstructPix2Pix, with text CFG and image CFG influencing the guidance scale
edited_image = pipe(
prompt=prompt,
image=image,
num_inference_steps=70, # Number of diffusion steps
guidance_scale=text_cfg, # Text CFG for following the prompt
image_guidance_scale=image_cfg, # Simulated Image CFG to preserve image content
generator=torch.manual_seed(seed) # Random seed for consistency
).images[0]
return edited_image
# Gradio interface for image editing in Arabic
def image_interface():
with gr.Blocks(css=".gradio-container {direction: rtl}") as demo_color:
gr.Markdown("## تطبيق لتغيير لون الجدران")
# Image upload (translated to Arabic)
image_input = gr.Image(type="pil", label="قم برفع صورة للغرفة")
# List of common painting colors in Arabic
common_colors = [
"أبيض", "أسود", "أزرق", "أخضر", "أحمر", "أصفر",
"رمادي", "برتقالي", "بنفسجي", "وردي", "بني",
"كحلي", "زهري", "فيروزي", "بيج"
]
# Dropdown for wall color (Arabic)
color_input = gr.Dropdown(common_colors, label="اختر لون الجدران")
# Display output image
result_image = gr.Image(label="الصورة المعدلة")
# Button to apply the wall color transformation
submit_button = gr.Button("قم بتغيير لون الجدران")
# Define action on button click (directly pass dropdown color input to the function)
submit_button.click(fn=change_color, inputs=[image_input, color_input], outputs=result_image)
return demo_color
# Function to translate Arabic prompt to English
def translate_prompt(prompt_ar):
translated_tokens = translation_tokenizer(prompt_ar, return_tensors="pt", truncation=True)
translated = translation_model.generate(**translated_tokens)
prompt_en = translation_tokenizer.decode(translated[0], skip_special_tokens=True)
return prompt_en
# General image editing function
def edit_image(image, instruction_ar):
# Translate Arabic instruction to English
instruction_en = translate_prompt(instruction_ar)
# Text CFG (guidance_scale) controls how strongly the model follows the prompt
text_cfg = 12.0
# Image CFG: Simulated value for preserving the original image content
image_cfg = 1.5
# Apply the edit using InstructPix2Pix with the translated prompt
edited_image = pipe(
prompt=instruction_en,
image=image,
num_inference_steps=70, # Number of diffusion steps
guidance_scale=text_cfg, # Text CFG for following the prompt
image_guidance_scale=image_cfg, # Simulated Image CFG to preserve image content
generator=torch.manual_seed(seed) # Random seed for consistency
).images[0]
return edited_image
# Gradio interface for general image editing in Arabic
def general_editing_interface():
with gr.Blocks(css=".gradio-container {direction: rtl}") as demo_general:
gr.Markdown("## تطبيق تحرير الصور العام")
# Image upload in Arabic
image_input = gr.Image(type="pil", label="قم بتحميل صورة")
# Textbox for instruction in Arabic
instruction_input = gr.Textbox(label="أدخل التعليمات", placeholder="وصف التعديلات (مثل: 'اجعل الجو مثلج')")
# Display output image
result_image = gr.Image(label="الصورة المعدلة")
# Button to apply the transformation
submit_button = gr.Button("تطبيق التعديلات")
# Button to change the seed (style)
change_style_button = gr.Button("تغيير النمط")
# Output for seed change message
seed_output = gr.Textbox(label="معلومات النمط", interactive=False)
# Define action on button click
submit_button.click(fn=edit_image, inputs=[image_input, instruction_input], outputs=result_image)
change_style_button.click(fn=change_style, outputs=seed_output)
return demo_general
# Launch both Gradio apps
color_app = image_interface()
general_editing_app = general_editing_interface()
with gr.Blocks(css=".gradio-container {direction: rtl}") as combined_demo:
gr.Markdown("## اختر التطبيق")
with gr.Tab("تطبيق تحرير الصور "):
general_editing_app.render()
with gr.Tab("تطبيق تغيير لون الطلاء"):
color_app.render()
# Launch the combined Gradio app
combined_demo.launch()