Spaces:
Running
Running
TedYeh
commited on
Commit
•
efb05de
1
Parent(s):
f680b9e
update app
Browse files- app.py +1 -1
- predictor.py +2 -2
app.py
CHANGED
@@ -13,7 +13,7 @@ with gr.Blocks() as demo:
|
|
13 |
"""
|
14 |
)
|
15 |
image = gr.Image(type="pil")
|
16 |
-
with gr.
|
17 |
# 設定輸出元件
|
18 |
heights = gr.Textbox(label="Heignt")
|
19 |
bust = gr.Textbox(label="Bust")
|
|
|
13 |
"""
|
14 |
)
|
15 |
image = gr.Image(type="pil")
|
16 |
+
with gr.Column(scale=1, min_width=600):
|
17 |
# 設定輸出元件
|
18 |
heights = gr.Textbox(label="Heignt")
|
19 |
bust = gr.Textbox(label="Bust")
|
predictor.py
CHANGED
@@ -198,10 +198,10 @@ def evaluation(model, epoch, device, dataloaders):
|
|
198 |
print(preds)
|
199 |
|
200 |
def inference(inp_img, classes = ['big', 'small'], epoch = 6):
|
201 |
-
device = torch.device("
|
202 |
translator= Translator(to_lang="zh-TW")
|
203 |
|
204 |
-
model =
|
205 |
model.load_state_dict(torch.load(f'models/model_{epoch}.pt'))
|
206 |
# load image-to-text model
|
207 |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
|
|
198 |
print(preds)
|
199 |
|
200 |
def inference(inp_img, classes = ['big', 'small'], epoch = 6):
|
201 |
+
device = torch.device("cuda")
|
202 |
translator= Translator(to_lang="zh-TW")
|
203 |
|
204 |
+
model = CUPredictor().to(device)
|
205 |
model.load_state_dict(torch.load(f'models/model_{epoch}.pt'))
|
206 |
# load image-to-text model
|
207 |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|