VikramSingh178 commited on
Commit
5c75d65
1 Parent(s): 4ff8630

chore: Update inpainting pipeline configuration and parameters

Browse files

Former-commit-id: 954ab0d8f2b70310083bc27d75b1e14dbb5e01d0
Former-commit-id: adb9dc9d2d977e7cac01564d404a7a593a2f4858
Former-commit-id: 3fedb691871dfcf23f718911e3e9063284b8d0b9
Former-commit-id: cf3391c6d9e4778de7150a2e4dec077478844983
Former-commit-id: 79c3a3f8305df1902891a2270e77917a616a8013

Files changed (1) hide show
  1. scripts/inpainting_pipeline.py +0 -35
scripts/inpainting_pipeline.py CHANGED
@@ -5,11 +5,6 @@ from api_utils import accelerator, ImageAugmentation
5
  import hydra
6
  from omegaconf import DictConfig
7
  from PIL import Image
8
- <<<<<<< HEAD
9
-
10
- =======
11
- from functools import lru_cache
12
- >>>>>>> bb1cf42 (chore: Update inpainting pipeline configuration and parameters)
13
 
14
  def load_pipeline(model_name: str, device, enable_compile: bool = True):
15
  pipeline = AutoPipelineForInpainting.from_pretrained(model_name, torch_dtype=torch.float16)
@@ -27,7 +22,6 @@ class AutoPaintingPipeline:
27
  self.mask_image = mask_image
28
  self.target_width = target_width
29
  self.target_height = target_height
30
- <<<<<<< HEAD
31
 
32
  def run_inference(self, prompt: str, negative_prompt: str, num_inference_steps: int, strength: float, guidance_scale: float,num_images):
33
  output = self.pipeline(
@@ -43,25 +37,6 @@ class AutoPaintingPipeline:
43
  width=self.target_width
44
 
45
  ).images[0]
46
- =======
47
- self.pipeline.to(self.device)
48
- self.pipeline.unet = torch.compile(self.pipeline.unet,mode='max-autotune')
49
-
50
-
51
-
52
-
53
- def run_inference(self, prompt: str, negative_prompt: str, num_inference_steps: int, strength: float, guidance_scale: float):
54
- """
55
- Runs the inference on the input image using the inpainting pipeline.
56
-
57
- Returns:
58
- Image: The output image after inpainting.
59
- """
60
-
61
- image = load_image(self.image)
62
- mask_image = load_image(self.mask_image)
63
- output = self.pipeline(prompt=prompt,negative_prompt=negative_prompt,image=image,mask_image=mask_image,num_inference_steps=num_inference_steps,strength=strength,guidance_scale=guidance_scale, height = self.target_height ,width = self.target_width).images[0]
64
- >>>>>>> bb1cf42 (chore: Update inpainting pipeline configuration and parameters)
65
  return output
66
 
67
  @hydra.main(version_base=None, config_path="../configs", config_name="inpainting")
@@ -76,7 +51,6 @@ def inference(cfg: DictConfig):
76
  extended_image = augmenter.extend_image(image)
77
  mask_image = augmenter.generate_mask_from_bbox(extended_image, cfg.segmentation_model, cfg.detection_model)
78
  mask_image = augmenter.invert_mask(mask_image)
79
- <<<<<<< HEAD
80
 
81
  # Create AutoPaintingPipeline instance with cached pipeline
82
  painting_pipeline = AutoPaintingPipeline(
@@ -97,15 +71,6 @@ def inference(cfg: DictConfig):
97
  )
98
 
99
  # Save output and mask images
100
- =======
101
- prompt = cfg.prompt
102
- negative_prompt = cfg.negative_prompt
103
- num_inference_steps = cfg.num_inference_steps
104
- strength = cfg.strength
105
- guidance_scale = cfg.guidance_scale
106
- pipeline = AutoPaintingPipeline(model_name=model_name, image = extended_image, mask_image=mask_image, target_height=cfg.target_height, target_width=cfg.target_width)
107
- output = pipeline.run_inference(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, strength=strength, guidance_scale=guidance_scale)
108
- >>>>>>> bb1cf42 (chore: Update inpainting pipeline configuration and parameters)
109
  output.save(f'{cfg.output_path}/output.jpg')
110
  mask_image.save(f'{cfg.output_path}/mask.jpg')
111
 
 
5
  import hydra
6
  from omegaconf import DictConfig
7
  from PIL import Image
 
 
 
 
 
8
 
9
  def load_pipeline(model_name: str, device, enable_compile: bool = True):
10
  pipeline = AutoPipelineForInpainting.from_pretrained(model_name, torch_dtype=torch.float16)
 
22
  self.mask_image = mask_image
23
  self.target_width = target_width
24
  self.target_height = target_height
 
25
 
26
  def run_inference(self, prompt: str, negative_prompt: str, num_inference_steps: int, strength: float, guidance_scale: float,num_images):
27
  output = self.pipeline(
 
37
  width=self.target_width
38
 
39
  ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  return output
41
 
42
  @hydra.main(version_base=None, config_path="../configs", config_name="inpainting")
 
51
  extended_image = augmenter.extend_image(image)
52
  mask_image = augmenter.generate_mask_from_bbox(extended_image, cfg.segmentation_model, cfg.detection_model)
53
  mask_image = augmenter.invert_mask(mask_image)
 
54
 
55
  # Create AutoPaintingPipeline instance with cached pipeline
56
  painting_pipeline = AutoPaintingPipeline(
 
71
  )
72
 
73
  # Save output and mask images
 
 
 
 
 
 
 
 
 
74
  output.save(f'{cfg.output_path}/output.jpg')
75
  mask_image.save(f'{cfg.output_path}/mask.jpg')
76