OmPrakashSingh1704 commited on
Commit
ae83f6f
1 Parent(s): 99c129a
app.py CHANGED
@@ -137,10 +137,12 @@ with gr.Blocks() as demo:
137
  img = gr.Image()
138
  prompt = gr.Textbox(label="Enter the text to get a good start")
139
  btn = gr.Button()
140
- size = gr.Slider(label="Size", minimum=256, maximum=MAX_IMAGE_SIZE, step=8, value=1024)
141
- num_inference_steps = gr.Slider(label="num_inference_steps", minimum=1, maximum=100,step=1, value=20)
 
 
142
  out_img = gr.Image()
143
- btn.click(Banner.Image2Image_2, [prompt, img,size,num_inference_steps], out_img)
144
 
145
 
146
  with gr.Tab("Video"):
 
137
  img = gr.Image()
138
  prompt = gr.Textbox(label="Enter the text to get a good start")
139
  btn = gr.Button()
140
+ with gr.Accordion("Advanced options", open=False):
141
+ size = gr.Slider(label="Size", minimum=256, maximum=MAX_IMAGE_SIZE, step=8, value=1024)
142
+ num_inference_steps = gr.Slider(label="num_inference_steps", minimum=1, maximum=100,step=1, value=20)
143
+ guidance_scale=gr.Slider(label="guidance_scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
144
  out_img = gr.Image()
145
+ btn.click(Banner.Image2Image_2, [prompt, img,size,num_inference_steps,guidance_scale], out_img)
146
 
147
 
148
  with gr.Tab("Video"):
options/Banner.py CHANGED
@@ -21,5 +21,5 @@ def Image2Image(
21
  progress=gr.Progress(track_tqdm=True)
22
  ):return I2I(input_image_editor,input_text,seed_slicer,randomize_seed_checkbox,strength_slider,num_inference_steps_slider)
23
 
24
- def Image2Image_2(prompt,image,size,num_inference_steps=30):
25
  return I2I_2(image, prompt,size,num_inference_steps)
 
21
  progress=gr.Progress(track_tqdm=True)
22
  ):return I2I(input_image_editor,input_text,seed_slicer,randomize_seed_checkbox,strength_slider,num_inference_steps_slider)
23
 
24
+ def Image2Image_2(prompt,image,size,num_inference_steps,guidance_scale):
25
  return I2I_2(image, prompt,size,num_inference_steps)
options/Banner_Model/Image2Image_2.py CHANGED
@@ -8,7 +8,7 @@ device= "cuda" if torch.cuda.is_available() else "cpu"
8
  print("Using device for I2I_2:", device)
9
 
10
  @spaces.GPU(duration=100)
11
- def I2I_2(image, prompt,size,num_inference_steps):
12
  processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
13
 
14
  checkpoint = "ControlNet-1-1-preview/control_v11p_sd15_lineart"
@@ -23,5 +23,5 @@ def I2I_2(image, prompt,size,num_inference_steps):
23
  image.resize((size,size))
24
  image=processor(image)
25
  generator = torch.Generator(device=device).manual_seed(0)
26
- image = pipe(prompt+"best quality, extremely detailed", num_inference_steps=num_inference_steps, generator=generator, image=image,negative_prompt="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality").images[0]
27
  return image
 
8
  print("Using device for I2I_2:", device)
9
 
10
  @spaces.GPU(duration=100)
11
+ def I2I_2(image, prompt,size,num_inference_steps,guidance_scale):
12
  processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
13
 
14
  checkpoint = "ControlNet-1-1-preview/control_v11p_sd15_lineart"
 
23
  image.resize((size,size))
24
  image=processor(image)
25
  generator = torch.Generator(device=device).manual_seed(0)
26
+ image = pipe(prompt+"best quality, extremely detailed", num_inference_steps=num_inference_steps, generator=generator, image=image,negative_prompt="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",guidance_scale=guidance_scale).images[0]
27
  return image
options/Banner_Model/__pycache__/Image2Image_2.cpython-310.pyc CHANGED
Binary files a/options/Banner_Model/__pycache__/Image2Image_2.cpython-310.pyc and b/options/Banner_Model/__pycache__/Image2Image_2.cpython-310.pyc differ