Spaces:
Running
Running
ignore temp files
Browse files- Dockerfile +2 -2
- main.py +9 -7
Dockerfile
CHANGED
@@ -14,9 +14,9 @@ RUN apt-get update && apt-get install -y \
|
|
14 |
|
15 |
# Copy the current directory contents into the container at /app
|
16 |
COPY . /app
|
17 |
-
RUN chown daemon:daemon -R /app/*
|
18 |
# Install any needed packages specified in requirements.txt
|
19 |
-
RUN pip install --no-cache-dir -r requirements.txt
|
20 |
|
21 |
# Make port 7680 available to the world outside this container
|
22 |
EXPOSE 7860
|
|
|
14 |
|
15 |
# Copy the current directory contents into the container at /app
|
16 |
COPY . /app
|
17 |
+
# RUN chown daemon:daemon -R /app/*
|
18 |
# Install any needed packages specified in requirements.txt
|
19 |
+
# RUN pip install --no-cache-dir -r requirements.txt
|
20 |
|
21 |
# Make port 7680 available to the world outside this container
|
22 |
EXPOSE 7860
|
main.py
CHANGED
@@ -5,7 +5,7 @@ import aiohttp
|
|
5 |
from fastapi import FastAPI, File, UploadFile, HTTPException
|
6 |
from fastapi.responses import JSONResponse
|
7 |
|
8 |
-
import os
|
9 |
# from os import path
|
10 |
# cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
|
11 |
|
@@ -21,7 +21,9 @@ import os
|
|
21 |
# os.environ['TORCH_HOME'] = PATH
|
22 |
# os.environ['HF_HUB_CACHE'] = '/home/ahmadzen/.cache/huggingface'
|
23 |
|
24 |
-
from transformers import AutoImageProcessor, ViTForImageClassification
|
|
|
|
|
25 |
from PIL import Image
|
26 |
from cachetools import Cache
|
27 |
import torch
|
@@ -41,9 +43,9 @@ logging.basicConfig(
|
|
41 |
cache = Cache(maxsize=1000)
|
42 |
|
43 |
# Load the model using the transformers pipeline
|
44 |
-
|
45 |
-
image_processor = AutoImageProcessor.from_pretrained("Wvolf/ViT_Deepfake_Detection")
|
46 |
-
model = ViTForImageClassification.from_pretrained("Wvolf/ViT_Deepfake_Detection")
|
47 |
|
48 |
# Detect the device used by TensorFlow
|
49 |
# DEVICE = "GPU" if tf.config.list_physical_devices("GPU") else "CPU"
|
@@ -153,7 +155,7 @@ async def classify_image(file: UploadFile = File(None)):
|
|
153 |
|
154 |
return FileImageDetectionResponse(**response_data)
|
155 |
|
156 |
-
except
|
157 |
logging.error("Error processing image: %s", str(e))
|
158 |
raise HTTPException(
|
159 |
status_code=500, detail=f"Error processing image: {str(e)}"
|
@@ -233,7 +235,7 @@ async def classify_images(request: ImageUrlsRequest):
|
|
233 |
|
234 |
response_data.append(detection_result)
|
235 |
|
236 |
-
except
|
237 |
logging.error("Error processing image from %s: %s", image_url, str(e))
|
238 |
raise HTTPException(
|
239 |
status_code=500,
|
|
|
5 |
from fastapi import FastAPI, File, UploadFile, HTTPException
|
6 |
from fastapi.responses import JSONResponse
|
7 |
|
8 |
+
# import os
|
9 |
# from os import path
|
10 |
# cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
|
11 |
|
|
|
21 |
# os.environ['TORCH_HOME'] = PATH
|
22 |
# os.environ['HF_HUB_CACHE'] = '/home/ahmadzen/.cache/huggingface'
|
23 |
|
24 |
+
# from transformers import AutoImageProcessor, ViTForImageClassification
|
25 |
+
from transformers import pipeline
|
26 |
+
from transformers.pipelines import PipelineException
|
27 |
from PIL import Image
|
28 |
from cachetools import Cache
|
29 |
import torch
|
|
|
43 |
cache = Cache(maxsize=1000)
|
44 |
|
45 |
# Load the model using the transformers pipeline
|
46 |
+
model = pipeline("image-classification", model="Wvolf/ViT_Deepfake_Detection")
|
47 |
+
# image_processor = AutoImageProcessor.from_pretrained("Wvolf/ViT_Deepfake_Detection")
|
48 |
+
# model = ViTForImageClassification.from_pretrained("Wvolf/ViT_Deepfake_Detection")
|
49 |
|
50 |
# Detect the device used by TensorFlow
|
51 |
# DEVICE = "GPU" if tf.config.list_physical_devices("GPU") else "CPU"
|
|
|
155 |
|
156 |
return FileImageDetectionResponse(**response_data)
|
157 |
|
158 |
+
except PipelineException as e:
|
159 |
logging.error("Error processing image: %s", str(e))
|
160 |
raise HTTPException(
|
161 |
status_code=500, detail=f"Error processing image: {str(e)}"
|
|
|
235 |
|
236 |
response_data.append(detection_result)
|
237 |
|
238 |
+
except PipelineException as e:
|
239 |
logging.error("Error processing image from %s: %s", image_url, str(e))
|
240 |
raise HTTPException(
|
241 |
status_code=500,
|