File size: 3,270 Bytes
f04b1aa
9421f12
 
 
 
 
 
 
 
 
 
 
667d497
9421f12
 
 
bab5f49
2a4fba6
361fc81
9421f12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c0594c6
 
9421f12
c0594c6
 
 
 
 
 
 
 
 
 
9421f12
 
c0594c6
 
64c6686
c0594c6
 
64c6686
c0594c6
9421f12
c0594c6
 
ac49be6
9421f12
 
 
c0594c6
 
 
9421f12
 
 
 
 
 
 
 
c0594c6
 
 
 
9421f12
 
 
 
2e5e509
9421f12
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import { env, AutoProcessor, AutoModel, RawImage } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.17.2';

// Since we will download the model from the Hugging Face Hub, we can skip the local model check
env.allowLocalModels = false;

// Reference the elements that we will need
const status = document.getElementById('status');
const fileUpload = document.getElementById('upload');
const imageContainer = document.getElementById('container');
const example = document.getElementById('example');

const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
const THRESHOLD = 0.2;

// Create a new object detection pipeline
status.textContent = 'Loading model...';
const model_id = 'onnx-community/yolov10s';
const processor = await AutoProcessor.from_pretrained(model_id);
const model = await AutoModel.from_pretrained(model_id, { quantized: true });
status.textContent = 'Ready';

example.addEventListener('click', (e) => {
    e.preventDefault();
    detect(EXAMPLE_URL);
});

fileUpload.addEventListener('change', function (e) {
    const file = e.target.files[0];
    if (!file) {
        return;
    }

    const reader = new FileReader();

    // Set up a callback when the file is loaded
    reader.onload = e2 => detect(e2.target.result);

    reader.readAsDataURL(file);
});


// Detect objects in the image
async function detect(url) {
    // Update UI
    imageContainer.innerHTML = '';

    // Read image
    const image = await RawImage.fromURL(url);

    // Set container width and height depending on the image aspect ratio
    const ar = image.width / image.height;
    const [cw, ch] = (ar > 1) ? [640, 640 / ar] : [640 * ar, 640];
    imageContainer.style.width = `${cw}px`;
    imageContainer.style.height = `${ch}px`;
    imageContainer.style.backgroundImage = `url(${url})`;

    status.textContent = 'Analysing...';

    // Preprocess image
    const inputs = await processor(image);

    // Predict bounding boxes
    const { output0 } = await model({ images: inputs.pixel_values });

    status.textContent = '';

    const sizes = inputs.reshaped_input_sizes[0].reverse();
    output0.tolist()[0].forEach(x => renderBox(x, sizes));
}

// Render a bounding box and label on the image
function renderBox([xmin, ymin, xmax, ymax, score, id], [w, h]) {
    if (score < THRESHOLD) return; // Skip boxes with low confidence

    // Generate a random color for the box
    const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);

    // Draw the box
    const boxElement = document.createElement('div');
    boxElement.className = 'bounding-box';
    Object.assign(boxElement.style, {
        borderColor: color,
        left: 100 * xmin / w + '%',
        top: 100 * ymin / h + '%',
        width: 100 * (xmax - xmin) / w + '%',
        height: 100 * (ymax - ymin) / h + '%',
    })

    // Draw label
    const labelElement = document.createElement('span');
    labelElement.textContent = `${model.config.id2label[id]} (${score.toFixed(2)})`.replaceAll(' ', '\u00a0');
    labelElement.className = 'bounding-box-label';
    labelElement.style.backgroundColor = color;

    boxElement.appendChild(labelElement);
    imageContainer.appendChild(boxElement);
}