Spaces:
Paused
Paused
Fix Gradio components
Browse files- .gitignore +1 -0
- ModelLoader.py +9 -6
- app.py +8 -7
.gitignore
CHANGED
@@ -1 +1,2 @@
|
|
1 |
__pycache__
|
|
|
|
1 |
__pycache__
|
2 |
+
.gradio
|
ModelLoader.py
CHANGED
@@ -58,14 +58,17 @@ class ModelLoader:
|
|
58 |
def load(self) -> None:
|
59 |
self.model = create_model(self.opt)
|
60 |
self.model.load_networks('latest')
|
61 |
-
def inference(self, src=''):
|
62 |
if self.model == None: self.load()
|
63 |
-
if not os.path.isfile(src):
|
64 |
-
raise Exception('The image %s is not found!' % src)
|
65 |
# Loading
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
69 |
print(img.shape)
|
70 |
# Inference
|
71 |
self.model.set_input({
|
|
|
58 |
def load(self) -> None:
|
59 |
self.model = create_model(self.opt)
|
60 |
self.model.load_networks('latest')
|
61 |
+
def inference(self, src='', image_pil=None):
|
62 |
if self.model == None: self.load()
|
|
|
|
|
63 |
# Loading
|
64 |
+
if isinstance(image_pil, Image.Image):
|
65 |
+
img = self.transform(image_pil.convert('RGB')).unsqueeze(0)
|
66 |
+
else:
|
67 |
+
if not os.path.isfile(src):
|
68 |
+
raise Exception('The image %s is not found!' % src)
|
69 |
+
print('Loading the image %s' % src)
|
70 |
+
source = Image.open(src).convert('RGB')
|
71 |
+
img = self.transform(source).unsqueeze(0)
|
72 |
print(img.shape)
|
73 |
# Inference
|
74 |
self.model.set_input({
|
app.py
CHANGED
@@ -2,23 +2,24 @@ import gradio as gr
|
|
2 |
from ModelLoader import ModelLoader
|
3 |
import os
|
4 |
|
5 |
-
max_img_wh =
|
6 |
|
7 |
def inference(image, use_gpu):
|
8 |
gpu_ids = '0' if use_gpu else ''
|
9 |
model = ModelLoader(gpu_ids=gpu_ids, max_img_wh=max_img_wh)
|
10 |
model.load()
|
11 |
-
output_img = model.inference(
|
12 |
return output_img
|
13 |
|
14 |
demo = gr.Interface(
|
15 |
fn=inference,
|
16 |
inputs=[
|
17 |
-
gr.
|
18 |
-
gr.
|
19 |
],
|
20 |
-
outputs=gr.
|
21 |
-
title=
|
22 |
-
description=
|
|
|
23 |
)
|
24 |
demo.launch()
|
|
|
2 |
from ModelLoader import ModelLoader
|
3 |
import os
|
4 |
|
5 |
+
max_img_wh = 4096 # Set the maximum image size in pixels
|
6 |
|
7 |
def inference(image, use_gpu):
|
8 |
gpu_ids = '0' if use_gpu else ''
|
9 |
model = ModelLoader(gpu_ids=gpu_ids, max_img_wh=max_img_wh)
|
10 |
model.load()
|
11 |
+
output_img = model.inference(image_pil=image)
|
12 |
return output_img
|
13 |
|
14 |
demo = gr.Interface(
|
15 |
fn=inference,
|
16 |
inputs=[
|
17 |
+
gr.components.Image(type='pil', label='Input Image'),
|
18 |
+
gr.components.Checkbox(label='Use GPU', value=True) # Precheck the GPU checkbox
|
19 |
],
|
20 |
+
outputs=gr.components.Image(type='pil', label='Output Image', format='jpeg'),
|
21 |
+
title='UnderWater Photo - published by Yarflam',
|
22 |
+
description='Upload an image to see the model output.',
|
23 |
+
flagging_mode='never'
|
24 |
)
|
25 |
demo.launch()
|