Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
β’
2f64630
1
Parent(s):
28ce999
working to add support for Refiner step
Browse files- .env +4 -1
- src/app/engine/render.ts +68 -14
.env
CHANGED
@@ -45,7 +45,10 @@ RENDERING_REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca
|
|
45 |
RENDERING_HF_INFERENCE_ENDPOINT_URL="https://XXXXXXXXXX.endpoints.huggingface.cloud"
|
46 |
|
47 |
# If you decided to use a Hugging Face Inference API model for the RENDERING engine
|
48 |
-
|
|
|
|
|
|
|
49 |
|
50 |
# An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
|
51 |
RENDERING_VIDEOCHAIN_API_URL="http://localhost:7860"
|
|
|
45 |
RENDERING_HF_INFERENCE_ENDPOINT_URL="https://XXXXXXXXXX.endpoints.huggingface.cloud"
|
46 |
|
47 |
# If you decided to use a Hugging Face Inference API model for the RENDERING engine
|
48 |
+
RENDERING_HF_INFERENCE_API_BASE_MODEL="stabilityai/stable-diffusion-xl-base-1.0"
|
49 |
+
|
50 |
+
# If you decided to use a Hugging Face Inference API model for the RENDERING engine
|
51 |
+
RENDERING_HF_INFERENCE_API_REFINER_MODEL="stabilityai/stable-diffusion-xl-refiner-1.0"
|
52 |
|
53 |
# An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
|
54 |
RENDERING_VIDEOCHAIN_API_URL="http://localhost:7860"
|
src/app/engine/render.ts
CHANGED
@@ -12,7 +12,8 @@ const renderingEngine = `${process.env.RENDERING_ENGINE || ""}` as RenderingEngi
|
|
12 |
// TODO: we should split Hugging Face and Replicate backends into separate files
|
13 |
const huggingFaceToken = `${process.env.AUTH_HF_API_TOKEN || ""}`
|
14 |
const huggingFaceInferenceEndpointUrl = `${process.env.RENDERING_HF_INFERENCE_ENDPOINT_URL || ""}`
|
15 |
-
const
|
|
|
16 |
|
17 |
const replicateToken = `${process.env.AUTH_REPLICATE_API_TOKEN || ""}`
|
18 |
const replicateModel = `${process.env.RENDERING_REPLICATE_API_MODEL || ""}`
|
@@ -102,13 +103,16 @@ export async function newRender({
|
|
102 |
if (renderingEngine === "INFERENCE_ENDPOINT" && !huggingFaceInferenceEndpointUrl) {
|
103 |
throw new Error(`you need to configure your RENDERING_HF_INFERENCE_ENDPOINT_URL in order to use the INFERENCE_ENDPOINT rendering engine`)
|
104 |
}
|
105 |
-
if (renderingEngine === "INFERENCE_API" && !
|
106 |
-
throw new Error(`you need to configure your
|
|
|
|
|
|
|
107 |
}
|
108 |
|
109 |
-
const
|
110 |
? huggingFaceInferenceEndpointUrl
|
111 |
-
: `https://api-inference.huggingface.co/models/${
|
112 |
|
113 |
/*
|
114 |
console.log(`calling ${url} with params: `, {
|
@@ -119,20 +123,22 @@ export async function newRender({
|
|
119 |
})
|
120 |
*/
|
121 |
|
122 |
-
const
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
method: "POST",
|
124 |
headers: {
|
125 |
"Content-Type": "application/json",
|
126 |
Authorization: `Bearer ${huggingFaceToken}`,
|
127 |
},
|
128 |
body: JSON.stringify({
|
129 |
-
inputs:
|
130 |
-
"beautiful",
|
131 |
-
"intricate details",
|
132 |
-
prompt,
|
133 |
-
"award winning",
|
134 |
-
"high resolution"
|
135 |
-
].join(", "),
|
136 |
parameters: {
|
137 |
num_inference_steps: 25,
|
138 |
guidance_scale: 8,
|
@@ -159,8 +165,56 @@ export async function newRender({
|
|
159 |
|
160 |
const contentType = res.headers.get('content-type')
|
161 |
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
return {
|
165 |
renderId: uuidv4(),
|
166 |
status: "completed",
|
|
|
12 |
// TODO: we should split Hugging Face and Replicate backends into separate files
|
13 |
const huggingFaceToken = `${process.env.AUTH_HF_API_TOKEN || ""}`
|
14 |
const huggingFaceInferenceEndpointUrl = `${process.env.RENDERING_HF_INFERENCE_ENDPOINT_URL || ""}`
|
15 |
+
const huggingFaceInferenceApiBaseModel = `${process.env.RENDERING_HF_INFERENCE_API_BASE_MODEL || ""}`
|
16 |
+
const huggingFaceInferenceApiRefinerModel = `${process.env.RENDERING_HF_INFERENCE_API_REFINER_MODEL || ""}`
|
17 |
|
18 |
const replicateToken = `${process.env.AUTH_REPLICATE_API_TOKEN || ""}`
|
19 |
const replicateModel = `${process.env.RENDERING_REPLICATE_API_MODEL || ""}`
|
|
|
103 |
if (renderingEngine === "INFERENCE_ENDPOINT" && !huggingFaceInferenceEndpointUrl) {
|
104 |
throw new Error(`you need to configure your RENDERING_HF_INFERENCE_ENDPOINT_URL in order to use the INFERENCE_ENDPOINT rendering engine`)
|
105 |
}
|
106 |
+
if (renderingEngine === "INFERENCE_API" && !huggingFaceInferenceApiBaseModel) {
|
107 |
+
throw new Error(`you need to configure your RENDERING_HF_INFERENCE_API_BASE_MODEL in order to use the INFERENCE_API rendering engine`)
|
108 |
+
}
|
109 |
+
if (renderingEngine === "INFERENCE_API" && !huggingFaceInferenceApiRefinerModel) {
|
110 |
+
throw new Error(`you need to configure your RENDERING_HF_INFERENCE_API_REFINER_MODEL in order to use the INFERENCE_API rendering engine`)
|
111 |
}
|
112 |
|
113 |
+
const baseModelUrl = renderingEngine === "INFERENCE_ENDPOINT"
|
114 |
? huggingFaceInferenceEndpointUrl
|
115 |
+
: `https://api-inference.huggingface.co/models/${huggingFaceInferenceApiBaseModel}`
|
116 |
|
117 |
/*
|
118 |
console.log(`calling ${url} with params: `, {
|
|
|
123 |
})
|
124 |
*/
|
125 |
|
126 |
+
const positivePrompt = [
|
127 |
+
"beautiful",
|
128 |
+
"intricate details",
|
129 |
+
prompt,
|
130 |
+
"award winning",
|
131 |
+
"high resolution"
|
132 |
+
].join(", ")
|
133 |
+
|
134 |
+
const res = await fetch(baseModelUrl, {
|
135 |
method: "POST",
|
136 |
headers: {
|
137 |
"Content-Type": "application/json",
|
138 |
Authorization: `Bearer ${huggingFaceToken}`,
|
139 |
},
|
140 |
body: JSON.stringify({
|
141 |
+
inputs: positivePrompt,
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
parameters: {
|
143 |
num_inference_steps: 25,
|
144 |
guidance_scale: 8,
|
|
|
165 |
|
166 |
const contentType = res.headers.get('content-type')
|
167 |
|
168 |
+
let assetUrl = `data:${contentType};base64,${Buffer.from(blob).toString('base64')}`
|
169 |
+
|
170 |
+
// note: there is no "refiner" step yet for custom inference endpoint
|
171 |
+
// you probably don't need it anyway, as you probably want to deploy an all-in-one model instead for perf reasons
|
172 |
+
if (renderingEngine === "INFERENCE_API") {
|
173 |
+
try {
|
174 |
+
const refinerModelUrl = `https://api-inference.huggingface.co/models/${huggingFaceInferenceApiRefinerModel}`
|
175 |
+
|
176 |
+
const res = await fetch(refinerModelUrl, {
|
177 |
+
method: "POST",
|
178 |
+
headers: {
|
179 |
+
"Content-Type": "application/json",
|
180 |
+
Authorization: `Bearer ${huggingFaceToken}`,
|
181 |
+
},
|
182 |
+
body: JSON.stringify({
|
183 |
+
data: assetUrl,
|
184 |
+
parameters: {
|
185 |
+
prompt: positivePrompt,
|
186 |
+
num_inference_steps: 25,
|
187 |
+
guidance_scale: 8,
|
188 |
+
width,
|
189 |
+
height,
|
190 |
+
},
|
191 |
+
use_cache: false,
|
192 |
+
}),
|
193 |
+
cache: "no-store",
|
194 |
+
// we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
|
195 |
+
// next: { revalidate: 1 }
|
196 |
+
})
|
197 |
+
|
198 |
|
199 |
+
// Recommendation: handle errors
|
200 |
+
if (res.status !== 200) {
|
201 |
+
const content = await res.text()
|
202 |
+
console.error(content)
|
203 |
+
// This will activate the closest `error.js` Error Boundary
|
204 |
+
throw new Error('Failed to fetch data')
|
205 |
+
}
|
206 |
+
|
207 |
+
const blob = await res.arrayBuffer()
|
208 |
+
|
209 |
+
const contentType = res.headers.get('content-type')
|
210 |
+
|
211 |
+
assetUrl = `data:${contentType};base64,${Buffer.from(blob).toString('base64')}`
|
212 |
+
|
213 |
+
} catch (err) {
|
214 |
+
console.log(`Refiner step failed, but this is not a blocker. Error details: ${err}`)
|
215 |
+
}
|
216 |
+
}
|
217 |
+
|
218 |
return {
|
219 |
renderId: uuidv4(),
|
220 |
status: "completed",
|