Delete main.py
Browse files
main.py
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
from fastapi import FastAPI, File, UploadFile
|
3 |
-
from fastapi.responses import FileResponse
|
4 |
-
from fastapi.staticfiles import StaticFiles
|
5 |
-
from fastapi import FastAPI, File, UploadFile, Form
|
6 |
-
from fastapi.responses import FileResponse
|
7 |
-
import torch
|
8 |
-
import shutil
|
9 |
-
import cv2
|
10 |
-
import numpy as np
|
11 |
-
import dlib
|
12 |
-
from torchvision import transforms
|
13 |
-
import torch.nn.functional as F
|
14 |
-
from vtoonify_model import Model # Importing the Model class from vtoonify_model.py
|
15 |
-
|
16 |
-
import gradio as gr
|
17 |
-
import pathlib
|
18 |
-
import sys
|
19 |
-
sys.path.insert(0, 'vtoonify')
|
20 |
-
|
21 |
-
from util import load_psp_standalone, get_video_crop_parameter, tensor2cv2
|
22 |
-
import torch
|
23 |
-
import torch.nn as nn
|
24 |
-
import numpy as np
|
25 |
-
import dlib
|
26 |
-
import cv2
|
27 |
-
from model.vtoonify import VToonify
|
28 |
-
from model.bisenet.model import BiSeNet
|
29 |
-
import torch.nn.functional as F
|
30 |
-
from torchvision import transforms
|
31 |
-
from model.encoder.align_all_parallel import align_face
|
32 |
-
import gc
|
33 |
-
import huggingface_hub
|
34 |
-
import os
|
35 |
-
|
36 |
-
app = FastAPI()
|
37 |
-
model = Model(device='cuda' if torch.cuda.is_available() else 'cpu')
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
@app.on_event("startup")
|
42 |
-
async def load_model():
|
43 |
-
global model
|
44 |
-
model = Model(device='cuda' if torch.cuda.is_available() else 'cpu')
|
45 |
-
|
46 |
-
from fastapi.responses import StreamingResponse
|
47 |
-
from io import BytesIO
|
48 |
-
|
49 |
-
@app.post("/upload/")
|
50 |
-
async def process_image(file: UploadFile = File(...), top: int = Form(...), bottom: int = Form(...), left: int = Form(...), right: int = Form(...)):
|
51 |
-
if model is None:
|
52 |
-
return {"error": "Model not loaded."}
|
53 |
-
|
54 |
-
# Save the uploaded image locally
|
55 |
-
with open("uploaded_image.jpg", "wb") as buffer:
|
56 |
-
shutil.copyfileobj(file.file, buffer)
|
57 |
-
|
58 |
-
# Process the uploaded image
|
59 |
-
aligned_face, instyle, message = model.detect_and_align_image("uploaded_image.jpg", top, bottom, left, right)
|
60 |
-
processed_image, message = model.image_toonify(aligned_face, instyle, model.exstyle, style_degree=0.5, style_type='cartoon1')
|
61 |
-
|
62 |
-
# Convert processed image to bytes
|
63 |
-
image_bytes = cv2.imencode('.jpg', processed_image)[1].tobytes()
|
64 |
-
|
65 |
-
# Return the processed image as a streaming response
|
66 |
-
return StreamingResponse(BytesIO(image_bytes), media_type="image/jpeg")
|
67 |
-
|
68 |
-
|
69 |
-
app.mount("/", StaticFiles(directory="AB", html=True), name="static")
|
70 |
-
|
71 |
-
@app.get("/")
|
72 |
-
def index() -> FileResponse:
|
73 |
-
return FileResponse(path="/app/AB/index.html", media_type="text/html")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|