init
Browse files
README.md
CHANGED
@@ -1,13 +1,16 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: static
|
7 |
pinned: false
|
8 |
-
models:
|
9 |
-
- Xenova/detr-resnet-50
|
10 |
-
license: apache-2.0
|
11 |
---
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: Vanilla Js Portrait Matting
|
3 |
+
emoji: 🐠
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: blue
|
6 |
sdk: static
|
7 |
pinned: false
|
|
|
|
|
|
|
8 |
---
|
9 |
|
10 |
+
# Vanilla Js Portrait Matting
|
11 |
+
|
12 |
+
Based [ZHKKKe/MODNet](https://github.com/ZHKKKe/MODNet) and [Transformers.js](https://huggingface.co/docs/transformers.js/index)
|
13 |
+
|
14 |
+
Run: `npx live-server`
|
15 |
+
|
16 |
+
Currently incompatible with Vite.
|
index.html
CHANGED
@@ -1,29 +1,26 @@
|
|
1 |
<!DOCTYPE html>
|
2 |
<html lang="en">
|
3 |
|
4 |
-
<head>
|
5 |
<meta charset="UTF-8" />
|
6 |
<link rel="stylesheet" href="style.css" />
|
7 |
-
|
8 |
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
9 |
-
<title>
|
10 |
</head>
|
11 |
|
12 |
<body>
|
13 |
-
<
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
<
|
25 |
-
|
26 |
-
<script src="index.js" type="module"></script>
|
27 |
</body>
|
28 |
|
29 |
</html>
|
|
|
1 |
<!DOCTYPE html>
|
2 |
<html lang="en">
|
3 |
|
4 |
+
<head><link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.css">
|
5 |
<meta charset="UTF-8" />
|
6 |
<link rel="stylesheet" href="style.css" />
|
|
|
7 |
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
8 |
+
<title>Portrait Matting demo</title>
|
9 |
</head>
|
10 |
|
11 |
<body>
|
12 |
+
<main class="container">
|
13 |
+
<label class="custom-file-select">
|
14 |
+
<input id="file-select" type="file" accept="image/*" />
|
15 |
+
select image
|
16 |
+
</label>
|
17 |
+
<p id="status"></p>
|
18 |
+
<div class="wrap">
|
19 |
+
<div id="image-container" class="left-column"></div>
|
20 |
+
<div id="output-container" class="right-column"></div>
|
21 |
+
</div>
|
22 |
+
</main>
|
23 |
+
<script src="./index.js" type="module"></script>
|
|
|
|
|
24 |
</body>
|
25 |
|
26 |
</html>
|
index.js
CHANGED
@@ -1,79 +1,119 @@
|
|
1 |
-
import {
|
2 |
|
3 |
-
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
|
4 |
-
env.allowLocalModels = false;
|
5 |
-
|
6 |
-
// Reference the elements that we will need
|
7 |
const status = document.getElementById('status');
|
8 |
-
const
|
9 |
-
const imageContainer = document.getElementById('container');
|
10 |
-
const
|
11 |
-
|
12 |
-
const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
|
13 |
|
14 |
-
// Create a new object detection pipeline
|
15 |
status.textContent = 'Loading model...';
|
16 |
-
|
|
|
|
|
|
|
|
|
17 |
status.textContent = 'Ready';
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
});
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
return;
|
28 |
-
}
|
29 |
|
30 |
-
|
|
|
|
|
31 |
|
32 |
-
|
33 |
-
|
|
|
34 |
|
35 |
-
|
36 |
-
|
37 |
|
|
|
|
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
|
|
|
|
43 |
|
44 |
-
|
45 |
-
const output = await detector(img, {
|
46 |
-
threshold: 0.5,
|
47 |
-
percentage: true,
|
48 |
-
});
|
49 |
-
status.textContent = '';
|
50 |
-
output.forEach(renderBox);
|
51 |
-
}
|
52 |
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
const
|
62 |
-
boxElement.className = 'bounding-box';
|
63 |
-
Object.assign(boxElement.style, {
|
64 |
-
borderColor: color,
|
65 |
-
left: 100 * xmin + '%',
|
66 |
-
top: 100 * ymin + '%',
|
67 |
-
width: 100 * (xmax - xmin) + '%',
|
68 |
-
height: 100 * (ymax - ymin) + '%',
|
69 |
-
})
|
70 |
|
71 |
-
|
72 |
-
const
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
}
|
|
|
1 |
+
import { AutoProcessor, RawImage, AutoModel } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
|
2 |
|
|
|
|
|
|
|
|
|
3 |
const status = document.getElementById('status');
|
4 |
+
const fileSelect = document.getElementById('file-select');
|
5 |
+
const imageContainer = document.getElementById('image-container');
|
6 |
+
const outputContainer = document.getElementById('output-container');
|
|
|
|
|
7 |
|
|
|
8 |
status.textContent = 'Loading model...';
|
9 |
+
|
10 |
+
// Load model and processor
|
11 |
+
const model = await AutoModel.from_pretrained('Xenova/modnet-onnx', { quantized: false });
|
12 |
+
const processor = await AutoProcessor.from_pretrained('Xenova/modnet-onnx');
|
13 |
+
|
14 |
status.textContent = 'Ready';
|
15 |
+
// Load image from URL
|
16 |
+
const url = 'https://images.pexels.com/photos/5965592/pexels-photo-5965592.jpeg?auto=compress&cs=tinysrgb&w=1024';
|
17 |
+
function useRemoteImage(url) {
|
18 |
+
const image = document.createElement('img');
|
19 |
+
image.src = url;
|
20 |
+
imageContainer.appendChild(image);
|
21 |
+
start(url);
|
22 |
+
}
|
23 |
+
useRemoteImage(url)
|
24 |
+
|
25 |
+
fileSelect.addEventListener('change', function (e) {
|
26 |
+
const file = e.target.files[0];
|
27 |
+
if (!file) {
|
28 |
+
return;
|
29 |
+
}
|
30 |
+
|
31 |
+
const reader = new FileReader();
|
32 |
|
33 |
+
// Set up a callback when the file is loaded
|
34 |
+
reader.onload = function (e2) {
|
35 |
+
status.textContent = 'Image loaded';
|
36 |
+
|
37 |
+
imageContainer.innerHTML = '';
|
38 |
+
outputContainer.innerHTML = '';
|
39 |
+
const image = document.createElement('img');
|
40 |
+
image.src = e2.target.result;
|
41 |
+
imageContainer.appendChild(image);
|
42 |
+
start(image.src);
|
43 |
+
};
|
44 |
+
reader.readAsDataURL(file);
|
45 |
});
|
46 |
|
47 |
+
async function start(source) {
|
48 |
+
status.textContent = 'processing';
|
49 |
+
console.log('start process')
|
|
|
|
|
50 |
|
51 |
+
const image = await RawImage.read(source);
|
52 |
+
// Process image
|
53 |
+
const { pixel_values: input } = await processor(image);
|
54 |
|
55 |
+
// Predict alpha matte
|
56 |
+
const { output } = await model({ input });
|
57 |
+
console.log('image', RawImage)
|
58 |
|
59 |
+
// Convert output tensor to RawImage
|
60 |
+
const matteImage = await RawImage.fromTensor(output[0].mul(255).to('uint8')).resize(image.width, image.height);
|
61 |
|
62 |
+
console.log('matteImage', matteImage, output)
|
63 |
+
status.textContent = 'Finish';
|
64 |
|
65 |
+
async function renderRawImage(image) {
|
66 |
+
let rawCanvas = await image.toCanvas();
|
67 |
+
const canvas = document.createElement('canvas');
|
68 |
+
outputContainer.appendChild(canvas); // 将新创建的 Canvas 添加到页面中
|
69 |
+
canvas.width = image.width;
|
70 |
+
canvas.height = image.height;
|
71 |
|
72 |
+
const ctx = canvas.getContext('2d');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
+
ctx.drawImage(rawCanvas, 0, 0);
|
75 |
+
|
76 |
+
}
|
77 |
+
|
78 |
+
// renderRawImage(matteImage)
|
79 |
+
|
80 |
+
async function getForeground(rawImage, maskImage) {
|
81 |
+
const rawCanvas = rawImage.toCanvas();
|
82 |
+
const rawCtx = rawCanvas.getContext('2d');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
+
const maskCanvas = maskImage.toCanvas();
|
85 |
+
const maskCtx = maskCanvas.getContext('2d');
|
86 |
+
|
87 |
+
const rawImageData = rawCtx.getImageData(0, 0, rawCanvas.width, rawCanvas.height);
|
88 |
+
const maskImageData = maskCtx.getImageData(0, 0, maskCanvas.width, maskCanvas.height);
|
89 |
+
|
90 |
+
for (let i = 0; i < rawImageData.data.length; i += 4) {
|
91 |
+
// 把灰度通道值(RGB 都一样,这里取 R),赋到原图的透明通道(每个像素的第 4 个值)
|
92 |
+
rawImageData.data[i + 3] = maskImageData.data[i];
|
93 |
+
}
|
94 |
|
95 |
+
rawCtx.putImageData(rawImageData, 0, 0);
|
96 |
+
return rawCanvas;
|
97 |
+
}
|
98 |
+
|
99 |
+
let foregroundCanvas = await getForeground(image, matteImage);
|
100 |
+
|
101 |
+
// 使用示例:
|
102 |
+
console.log('debug', foregroundCanvas);
|
103 |
+
// 模拟异步操作,确保在完成操作后才继续执行
|
104 |
+
foregroundCanvas.convertToBlob()
|
105 |
+
.then(function (blob) {
|
106 |
+
// 创建图片
|
107 |
+
let img = new Image();
|
108 |
+
|
109 |
+
// 创建 blob URL 并设置为图片的 src
|
110 |
+
img.src = URL.createObjectURL(blob);
|
111 |
+
|
112 |
+
// 将图片添加到 body 中或者其他 HTML 元素
|
113 |
+
outputContainer.appendChild(img);
|
114 |
+
})
|
115 |
+
.catch(function (error) {
|
116 |
+
// 捕获和处理 blob 创建过程中可能出现的错误
|
117 |
+
console.error("Blob creation error: ", error);
|
118 |
+
});
|
119 |
}
|
style.css
CHANGED
@@ -1,76 +1,77 @@
|
|
1 |
-
* {
|
2 |
-
box-sizing: border-box;
|
3 |
-
padding: 0;
|
4 |
-
margin: 0;
|
5 |
-
font-family: sans-serif;
|
6 |
-
}
|
7 |
-
|
8 |
html,
|
9 |
body {
|
10 |
-
|
11 |
-
}
|
12 |
-
|
13 |
-
body {
|
14 |
-
padding: 32px;
|
15 |
}
|
16 |
|
17 |
-
|
18 |
-
|
|
|
19 |
display: flex;
|
20 |
flex-direction: column;
|
21 |
-
justify-content: center;
|
22 |
align-items: center;
|
23 |
}
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
33 |
|
34 |
-
|
35 |
-
border-radius: 0.75rem;
|
36 |
-
overflow: hidden;
|
37 |
-
cursor: pointer;
|
38 |
-
margin: 1rem;
|
39 |
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
}
|
45 |
|
46 |
-
|
47 |
-
display:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
}
|
49 |
|
50 |
-
|
51 |
-
|
52 |
}
|
53 |
|
54 |
-
#
|
55 |
-
|
56 |
-
|
57 |
-
cursor: pointer;
|
58 |
}
|
59 |
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
62 |
}
|
63 |
|
64 |
.bounding-box {
|
65 |
position: absolute;
|
66 |
box-sizing: border-box;
|
67 |
-
border:
|
|
|
68 |
}
|
69 |
|
70 |
.bounding-box-label {
|
71 |
color: white;
|
72 |
position: absolute;
|
73 |
font-size: 12px;
|
74 |
-
margin: -16px
|
|
|
75 |
padding: 1px;
|
76 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
html,
|
2 |
body {
|
3 |
+
font-family: Arial, Helvetica, sans-serif;
|
|
|
|
|
|
|
|
|
4 |
}
|
5 |
|
6 |
+
.container {
|
7 |
+
margin: 40px auto;
|
8 |
+
width: 100%;
|
9 |
display: flex;
|
10 |
flex-direction: column;
|
|
|
11 |
align-items: center;
|
12 |
}
|
13 |
|
14 |
+
.wrap {
|
15 |
+
display: flex;
|
16 |
+
flex-direction: row;
|
17 |
+
flex-wrap: wrap;
|
18 |
+
}
|
19 |
|
20 |
+
.left-column,
|
21 |
+
.right-column {
|
22 |
+
flex: 1;
|
23 |
+
padding: 10px;
|
24 |
+
box-sizing: border-box;
|
25 |
+
max-height: 500px;
|
26 |
+
}
|
27 |
|
28 |
+
@media (max-width: 508px) {
|
|
|
|
|
|
|
|
|
29 |
|
30 |
+
.left-column,
|
31 |
+
.right-column {
|
32 |
+
flex: 100%;
|
33 |
+
}
|
34 |
}
|
35 |
|
36 |
+
.custom-file-select {
|
37 |
+
display: flex;
|
38 |
+
align-items: center;
|
39 |
+
cursor: pointer;
|
40 |
+
gap: 10px;
|
41 |
+
border: 2px solid black;
|
42 |
+
padding: 8px 16px;
|
43 |
+
cursor: pointer;
|
44 |
+
border-radius: 6px;
|
45 |
}
|
46 |
|
47 |
+
#file-select {
|
48 |
+
display: none;
|
49 |
}
|
50 |
|
51 |
+
#out-container {
|
52 |
+
width: 400px;
|
53 |
+
height: 500px;
|
|
|
54 |
}
|
55 |
|
56 |
+
img {
|
57 |
+
width: 100%;
|
58 |
+
height: 100%;
|
59 |
+
object-fit: contain;
|
60 |
+
border: 1px dashed rgba(128, 128, 128, 0.5);
|
61 |
}
|
62 |
|
63 |
.bounding-box {
|
64 |
position: absolute;
|
65 |
box-sizing: border-box;
|
66 |
+
border-width: 2px;
|
67 |
+
border-style: solid;
|
68 |
}
|
69 |
|
70 |
.bounding-box-label {
|
71 |
color: white;
|
72 |
position: absolute;
|
73 |
font-size: 12px;
|
74 |
+
margin-top: -16px;
|
75 |
+
margin-left: -2px;
|
76 |
padding: 1px;
|
77 |
}
|