Spaces:
Running
Running
import io | |
import os | |
import sys | |
from fastapi import FastAPI, File, UploadFile | |
from fastapi.responses import RedirectResponse | |
import gradio as gr | |
import requests | |
from typing import List | |
import torch | |
from pdf2image import convert_from_path | |
from PIL import Image | |
from torch.utils.data import DataLoader | |
from transformers import AutoProcessor | |
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), './colpali-main'))) | |
from colpali_engine.models.paligemma_colbert_architecture import ColPali | |
from colpali_engine.trainer.retrieval_evaluator import CustomEvaluator | |
from colpali_engine.utils.colpali_processing_utils import ( | |
process_images, | |
process_queries, | |
) | |
app = FastAPI() | |
# Load model | |
model_name = "vidore/colpali" | |
token = os.environ.get("HF_TOKEN") | |
model = ColPali.from_pretrained( | |
"google/paligemma-3b-mix-448", torch_dtype=torch.bfloat16, device_map="cpu", token = token).eval() | |
model.load_adapter(model_name) | |
processor = AutoProcessor.from_pretrained(model_name, token = token) | |
device = "cuda:0" if torch.cuda.is_available() else "cpu" | |
if device != model.device: | |
model.to(device) | |
mock_image = Image.new("RGB", (448, 448), (255, 255, 255)) | |
# In-memory storage | |
ds = [] | |
images = [] | |
# Rediriger la racine vers /docs | |
def read_root(): | |
return RedirectResponse(url="/docs") | |
if __name__ == "__main__": | |
uvicorn.run(app, host="0.0.0.0", port=7860) |