Yarflam commited on
Commit
62b9889
·
1 Parent(s): a8eef7d

Fix Gradio components

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. ModelLoader.py +9 -6
  3. app.py +8 -7
.gitignore CHANGED
@@ -1 +1,2 @@
1
  __pycache__
 
 
1
  __pycache__
2
+ .gradio
ModelLoader.py CHANGED
@@ -58,14 +58,17 @@ class ModelLoader:
58
  def load(self) -> None:
59
  self.model = create_model(self.opt)
60
  self.model.load_networks('latest')
61
- def inference(self, src=''):
62
  if self.model == None: self.load()
63
- if not os.path.isfile(src):
64
- raise Exception('The image %s is not found!' % src)
65
  # Loading
66
- print('Loading the image %s' % src)
67
- source = Image.open(src).convert('RGB')
68
- img = self.transform(source).unsqueeze(0)
 
 
 
 
 
69
  print(img.shape)
70
  # Inference
71
  self.model.set_input({
 
58
  def load(self) -> None:
59
  self.model = create_model(self.opt)
60
  self.model.load_networks('latest')
61
+ def inference(self, src='', image_pil=None):
62
  if self.model == None: self.load()
 
 
63
  # Loading
64
+ if isinstance(image_pil, Image.Image):
65
+ img = self.transform(image_pil.convert('RGB')).unsqueeze(0)
66
+ else:
67
+ if not os.path.isfile(src):
68
+ raise Exception('The image %s is not found!' % src)
69
+ print('Loading the image %s' % src)
70
+ source = Image.open(src).convert('RGB')
71
+ img = self.transform(source).unsqueeze(0)
72
  print(img.shape)
73
  # Inference
74
  self.model.set_input({
app.py CHANGED
@@ -2,23 +2,24 @@ import gradio as gr
2
  from ModelLoader import ModelLoader
3
  import os
4
 
5
- max_img_wh = 1024 # Set the maximum image size in pixels
6
 
7
  def inference(image, use_gpu):
8
  gpu_ids = '0' if use_gpu else ''
9
  model = ModelLoader(gpu_ids=gpu_ids, max_img_wh=max_img_wh)
10
  model.load()
11
- output_img = model.inference(src=image)
12
  return output_img
13
 
14
  demo = gr.Interface(
15
  fn=inference,
16
  inputs=[
17
- gr.inputs.Image(type="pil", label="Input Image"),
18
- gr.inputs.Checkbox(label="Use GPU", value=True) # Precheck the GPU checkbox
19
  ],
20
- outputs=gr.outputs.Image(type="pil", label="Output Image"),
21
- title="Model Demo",
22
- description="Upload an image to see the model output."
 
23
  )
24
  demo.launch()
 
2
  from ModelLoader import ModelLoader
3
  import os
4
 
5
+ max_img_wh = 4096 # Set the maximum image size in pixels
6
 
7
  def inference(image, use_gpu):
8
  gpu_ids = '0' if use_gpu else ''
9
  model = ModelLoader(gpu_ids=gpu_ids, max_img_wh=max_img_wh)
10
  model.load()
11
+ output_img = model.inference(image_pil=image)
12
  return output_img
13
 
14
  demo = gr.Interface(
15
  fn=inference,
16
  inputs=[
17
+ gr.components.Image(type='pil', label='Input Image'),
18
+ gr.components.Checkbox(label='Use GPU', value=True) # Precheck the GPU checkbox
19
  ],
20
+ outputs=gr.components.Image(type='pil', label='Output Image', format='jpeg'),
21
+ title='UnderWater Photo - published by Yarflam',
22
+ description='Upload an image to see the model output.',
23
+ flagging_mode='never'
24
  )
25
  demo.launch()