Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
•
81eb27e
1
Parent(s):
7967a54
fixes for the Inference API
Browse files- src/app/engine/presets.ts +17 -12
- src/app/engine/render.ts +28 -22
- src/app/interface/panel/index.tsx +40 -5
- src/app/main.tsx +45 -29
- src/app/queries/getStory.ts +2 -2
- src/app/queries/predictWithHuggingFace.ts +5 -0
src/app/engine/presets.ts
CHANGED
@@ -44,11 +44,12 @@ export const presets: Record<string, Preset> = {
|
|
44 |
font: "actionman",
|
45 |
llmPrompt: "japanese manga",
|
46 |
imagePrompt: (prompt: string) => [
|
|
|
|
|
47 |
`japanese manga about ${prompt}`,
|
48 |
"single panel",
|
49 |
"manga",
|
50 |
"japanese",
|
51 |
-
"grayscale",
|
52 |
"intricate",
|
53 |
"detailed",
|
54 |
// "drawing"
|
@@ -98,9 +99,9 @@ export const presets: Record<string, Preset> = {
|
|
98 |
font: "actionman",
|
99 |
llmPrompt: "Franco-Belgian comic (a \"bande dessinée\"), in the style of Franquin, Moebius etc",
|
100 |
imagePrompt: (prompt: string) => [
|
101 |
-
`franco-belgian color comic about ${prompt}`,
|
102 |
"bande dessinée",
|
103 |
"franco-belgian comic",
|
|
|
104 |
"comic album",
|
105 |
// "color drawing"
|
106 |
],
|
@@ -123,9 +124,9 @@ export const presets: Record<string, Preset> = {
|
|
123 |
font: "actionman",
|
124 |
llmPrompt: "american comic",
|
125 |
imagePrompt: (prompt: string) => [
|
|
|
126 |
`modern american comic about ${prompt}`,
|
127 |
//"single panel",
|
128 |
-
"digital color comicbook style",
|
129 |
// "2010s",
|
130 |
// "digital print",
|
131 |
// "color comicbook",
|
@@ -182,11 +183,11 @@ export const presets: Record<string, Preset> = {
|
|
182 |
font: "actionman",
|
183 |
llmPrompt: "american comic",
|
184 |
imagePrompt: (prompt: string) => [
|
|
|
|
|
185 |
`vintage american color comic about ${prompt}`,
|
186 |
// "single panel",
|
187 |
// "comicbook style",
|
188 |
-
"1950",
|
189 |
-
"50s",
|
190 |
// "color comicbook",
|
191 |
// "color drawing"
|
192 |
],
|
@@ -243,10 +244,10 @@ export const presets: Record<string, Preset> = {
|
|
243 |
llmPrompt: "new pulp science fiction",
|
244 |
imagePrompt: (prompt: string) => [
|
245 |
`vintage color pulp comic panel`,
|
246 |
-
`${prompt}`,
|
247 |
"40s",
|
248 |
"1940",
|
249 |
"vintage science fiction",
|
|
|
250 |
// "single panel",
|
251 |
// "comic album"
|
252 |
],
|
@@ -271,13 +272,14 @@ export const presets: Record<string, Preset> = {
|
|
271 |
llmPrompt: "comic books by Moebius",
|
272 |
imagePrompt: (prompt: string) => [
|
273 |
`color comic panel`,
|
274 |
-
`${prompt}`,
|
275 |
"style of Moebius",
|
|
|
276 |
"by Moebius",
|
277 |
"french comic panel",
|
278 |
"franco-belgian style",
|
279 |
"bande dessinée",
|
280 |
"single panel",
|
|
|
281 |
// "comic album"
|
282 |
],
|
283 |
negativePrompt: () => [
|
@@ -300,10 +302,10 @@ export const presets: Record<string, Preset> = {
|
|
300 |
llmPrompt: "writing Tintin comic books",
|
301 |
imagePrompt: (prompt: string) => [
|
302 |
`color comic panel`,
|
303 |
-
`${prompt}`,
|
304 |
"style of Hergé",
|
305 |
-
"by Hergé",
|
306 |
"tintin style",
|
|
|
|
|
307 |
"french comic panel",
|
308 |
"franco-belgian style",
|
309 |
// "color panel",
|
@@ -355,11 +357,11 @@ export const presets: Record<string, Preset> = {
|
|
355 |
llmPrompt: "french style comic books set in ancient Rome and Gaul",
|
356 |
imagePrompt: (prompt: string) => [
|
357 |
`color comic panel`,
|
358 |
-
`about ${prompt}`,
|
359 |
"romans",
|
360 |
"gauls",
|
361 |
"french comic panel",
|
362 |
"franco-belgian style",
|
|
|
363 |
"bande dessinée",
|
364 |
"single panel",
|
365 |
// "comical",
|
@@ -412,6 +414,7 @@ export const presets: Record<string, Preset> = {
|
|
412 |
`patchwork`,
|
413 |
`style of Gustav Klimt`,
|
414 |
`Gustav Klimt painting`,
|
|
|
415 |
`${prompt}`,
|
416 |
],
|
417 |
negativePrompt: () => [
|
@@ -433,9 +436,11 @@ export const presets: Record<string, Preset> = {
|
|
433 |
imagePrompt: (prompt: string) => [
|
434 |
`medieval illuminated manuscript`,
|
435 |
`illuminated manuscript of`,
|
|
|
|
|
436 |
// `medieval color engraving`,
|
437 |
`${prompt}`,
|
438 |
-
`medieval`
|
439 |
],
|
440 |
negativePrompt: () => [
|
441 |
"manga",
|
@@ -506,9 +511,9 @@ export const presets: Record<string, Preset> = {
|
|
506 |
llmPrompt: "ancient egyptian stories.",
|
507 |
imagePrompt: (prompt: string) => [
|
508 |
`ancient egyptian wall painting`,
|
|
|
509 |
// `medieval color engraving`,
|
510 |
`${prompt}`,
|
511 |
-
`ancient egypt`,
|
512 |
],
|
513 |
negativePrompt: () => [
|
514 |
"manga",
|
|
|
44 |
font: "actionman",
|
45 |
llmPrompt: "japanese manga",
|
46 |
imagePrompt: (prompt: string) => [
|
47 |
+
`grayscale`,
|
48 |
+
`intricate details`,
|
49 |
`japanese manga about ${prompt}`,
|
50 |
"single panel",
|
51 |
"manga",
|
52 |
"japanese",
|
|
|
53 |
"intricate",
|
54 |
"detailed",
|
55 |
// "drawing"
|
|
|
99 |
font: "actionman",
|
100 |
llmPrompt: "Franco-Belgian comic (a \"bande dessinée\"), in the style of Franquin, Moebius etc",
|
101 |
imagePrompt: (prompt: string) => [
|
|
|
102 |
"bande dessinée",
|
103 |
"franco-belgian comic",
|
104 |
+
`franco-belgian color comic about ${prompt}`,
|
105 |
"comic album",
|
106 |
// "color drawing"
|
107 |
],
|
|
|
124 |
font: "actionman",
|
125 |
llmPrompt: "american comic",
|
126 |
imagePrompt: (prompt: string) => [
|
127 |
+
"digital color comicbook style",
|
128 |
`modern american comic about ${prompt}`,
|
129 |
//"single panel",
|
|
|
130 |
// "2010s",
|
131 |
// "digital print",
|
132 |
// "color comicbook",
|
|
|
183 |
font: "actionman",
|
184 |
llmPrompt: "american comic",
|
185 |
imagePrompt: (prompt: string) => [
|
186 |
+
"1950",
|
187 |
+
"50s",
|
188 |
`vintage american color comic about ${prompt}`,
|
189 |
// "single panel",
|
190 |
// "comicbook style",
|
|
|
|
|
191 |
// "color comicbook",
|
192 |
// "color drawing"
|
193 |
],
|
|
|
244 |
llmPrompt: "new pulp science fiction",
|
245 |
imagePrompt: (prompt: string) => [
|
246 |
`vintage color pulp comic panel`,
|
|
|
247 |
"40s",
|
248 |
"1940",
|
249 |
"vintage science fiction",
|
250 |
+
`${prompt}`,
|
251 |
// "single panel",
|
252 |
// "comic album"
|
253 |
],
|
|
|
272 |
llmPrompt: "comic books by Moebius",
|
273 |
imagePrompt: (prompt: string) => [
|
274 |
`color comic panel`,
|
|
|
275 |
"style of Moebius",
|
276 |
+
`${prompt}`,
|
277 |
"by Moebius",
|
278 |
"french comic panel",
|
279 |
"franco-belgian style",
|
280 |
"bande dessinée",
|
281 |
"single panel",
|
282 |
+
"intricate"
|
283 |
// "comic album"
|
284 |
],
|
285 |
negativePrompt: () => [
|
|
|
302 |
llmPrompt: "writing Tintin comic books",
|
303 |
imagePrompt: (prompt: string) => [
|
304 |
`color comic panel`,
|
|
|
305 |
"style of Hergé",
|
|
|
306 |
"tintin style",
|
307 |
+
`${prompt}`,
|
308 |
+
"by Hergé",
|
309 |
"french comic panel",
|
310 |
"franco-belgian style",
|
311 |
// "color panel",
|
|
|
357 |
llmPrompt: "french style comic books set in ancient Rome and Gaul",
|
358 |
imagePrompt: (prompt: string) => [
|
359 |
`color comic panel`,
|
|
|
360 |
"romans",
|
361 |
"gauls",
|
362 |
"french comic panel",
|
363 |
"franco-belgian style",
|
364 |
+
`about ${prompt}`,
|
365 |
"bande dessinée",
|
366 |
"single panel",
|
367 |
// "comical",
|
|
|
414 |
`patchwork`,
|
415 |
`style of Gustav Klimt`,
|
416 |
`Gustav Klimt painting`,
|
417 |
+
`intricate details`,
|
418 |
`${prompt}`,
|
419 |
],
|
420 |
negativePrompt: () => [
|
|
|
436 |
imagePrompt: (prompt: string) => [
|
437 |
`medieval illuminated manuscript`,
|
438 |
`illuminated manuscript of`,
|
439 |
+
`medieval`,
|
440 |
+
`intricate details`,
|
441 |
// `medieval color engraving`,
|
442 |
`${prompt}`,
|
443 |
+
// `medieval`
|
444 |
],
|
445 |
negativePrompt: () => [
|
446 |
"manga",
|
|
|
511 |
llmPrompt: "ancient egyptian stories.",
|
512 |
imagePrompt: (prompt: string) => [
|
513 |
`ancient egyptian wall painting`,
|
514 |
+
`ancient egypt`,
|
515 |
// `medieval color engraving`,
|
516 |
`${prompt}`,
|
|
|
517 |
],
|
518 |
negativePrompt: () => [
|
519 |
"manga",
|
src/app/engine/render.ts
CHANGED
@@ -26,12 +26,14 @@ export async function newRender({
|
|
26 |
prompt,
|
27 |
// negativePrompt,
|
28 |
width,
|
29 |
-
height
|
|
|
30 |
}: {
|
31 |
prompt: string
|
32 |
// negativePrompt: string[]
|
33 |
width: number
|
34 |
height: number
|
|
|
35 |
}) {
|
36 |
if (!prompt) {
|
37 |
const error = `cannot call the rendering API without a prompt, aborting..`
|
@@ -49,6 +51,8 @@ export async function newRender({
|
|
49 |
segments: []
|
50 |
}
|
51 |
|
|
|
|
|
52 |
|
53 |
try {
|
54 |
if (renderingEngine === "REPLICATE") {
|
@@ -69,7 +73,7 @@ export async function newRender({
|
|
69 |
input: {
|
70 |
prompt: [
|
71 |
"beautiful",
|
72 |
-
"intricate details",
|
73 |
prompt,
|
74 |
"award winning",
|
75 |
"high resolution"
|
@@ -111,10 +115,9 @@ export async function newRender({
|
|
111 |
? huggingFaceInferenceEndpointUrl
|
112 |
: `https://api-inference.huggingface.co/models/${huggingFaceInferenceApiBaseModel}`
|
113 |
|
114 |
-
|
115 |
const positivePrompt = [
|
116 |
"beautiful",
|
117 |
-
"intricate details",
|
118 |
prompt,
|
119 |
"award winning",
|
120 |
"high resolution"
|
@@ -129,12 +132,14 @@ export async function newRender({
|
|
129 |
body: JSON.stringify({
|
130 |
inputs: positivePrompt,
|
131 |
parameters: {
|
132 |
-
num_inference_steps:
|
133 |
-
guidance_scale:
|
134 |
width,
|
135 |
height,
|
136 |
},
|
137 |
-
|
|
|
|
|
138 |
}),
|
139 |
cache: "no-store",
|
140 |
// we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
|
@@ -159,13 +164,12 @@ export async function newRender({
|
|
159 |
// note: there is no "refiner" step yet for custom inference endpoint
|
160 |
// you probably don't need it anyway, as you probably want to deploy an all-in-one model instead for perf reasons
|
161 |
|
162 |
-
// update: right now it is not possible to use it from the Inference API either:
|
163 |
-
// "Model type not found or pipeline not implemented"
|
164 |
-
/*
|
165 |
if (renderingEngine === "INFERENCE_API") {
|
166 |
try {
|
167 |
const refinerModelUrl = `https://api-inference.huggingface.co/models/${huggingFaceInferenceApiRefinerModel}`
|
168 |
|
|
|
|
|
169 |
const res = await fetch(refinerModelUrl, {
|
170 |
method: "POST",
|
171 |
headers: {
|
@@ -173,15 +177,17 @@ export async function newRender({
|
|
173 |
Authorization: `Bearer ${huggingFaceToken}`,
|
174 |
},
|
175 |
body: JSON.stringify({
|
176 |
-
|
177 |
parameters: {
|
178 |
prompt: positivePrompt,
|
179 |
-
num_inference_steps:
|
180 |
-
guidance_scale:
|
181 |
width,
|
182 |
height,
|
183 |
},
|
184 |
-
|
|
|
|
|
185 |
}),
|
186 |
cache: "no-store",
|
187 |
// we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
|
@@ -191,22 +197,22 @@ export async function newRender({
|
|
191 |
|
192 |
// Recommendation: handle errors
|
193 |
if (res.status !== 200) {
|
194 |
-
const content = await res.
|
195 |
-
|
196 |
-
//
|
197 |
-
throw new Error('Failed to fetch data')
|
198 |
}
|
199 |
|
200 |
-
const
|
201 |
|
202 |
const contentType = res.headers.get('content-type')
|
203 |
|
204 |
-
assetUrl = `data:${contentType};base64,${Buffer.from(
|
205 |
|
206 |
} catch (err) {
|
207 |
console.log(`Refiner step failed, but this is not a blocker. Error details: ${err}`)
|
208 |
}
|
209 |
-
}
|
210 |
|
211 |
|
212 |
return {
|
@@ -230,7 +236,7 @@ export async function newRender({
|
|
230 |
prompt,
|
231 |
// negativePrompt, unused for now
|
232 |
nbFrames: 1,
|
233 |
-
nbSteps:
|
234 |
actionnables: [], // ["text block"],
|
235 |
segmentation: "disabled", // "firstframe", // one day we will remove this param, to make it automatic
|
236 |
width,
|
|
|
26 |
prompt,
|
27 |
// negativePrompt,
|
28 |
width,
|
29 |
+
height,
|
30 |
+
withCache
|
31 |
}: {
|
32 |
prompt: string
|
33 |
// negativePrompt: string[]
|
34 |
width: number
|
35 |
height: number
|
36 |
+
withCache: boolean
|
37 |
}) {
|
38 |
if (!prompt) {
|
39 |
const error = `cannot call the rendering API without a prompt, aborting..`
|
|
|
51 |
segments: []
|
52 |
}
|
53 |
|
54 |
+
const nbInferenceSteps = 30
|
55 |
+
const guidanceScale = 9
|
56 |
|
57 |
try {
|
58 |
if (renderingEngine === "REPLICATE") {
|
|
|
73 |
input: {
|
74 |
prompt: [
|
75 |
"beautiful",
|
76 |
+
// "intricate details",
|
77 |
prompt,
|
78 |
"award winning",
|
79 |
"high resolution"
|
|
|
115 |
? huggingFaceInferenceEndpointUrl
|
116 |
: `https://api-inference.huggingface.co/models/${huggingFaceInferenceApiBaseModel}`
|
117 |
|
|
|
118 |
const positivePrompt = [
|
119 |
"beautiful",
|
120 |
+
// "intricate details",
|
121 |
prompt,
|
122 |
"award winning",
|
123 |
"high resolution"
|
|
|
132 |
body: JSON.stringify({
|
133 |
inputs: positivePrompt,
|
134 |
parameters: {
|
135 |
+
num_inference_steps: nbInferenceSteps,
|
136 |
+
guidance_scale: guidanceScale,
|
137 |
width,
|
138 |
height,
|
139 |
},
|
140 |
+
|
141 |
+
// this doesn't do what you think it does
|
142 |
+
use_cache: false, // withCache,
|
143 |
}),
|
144 |
cache: "no-store",
|
145 |
// we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
|
|
|
164 |
// note: there is no "refiner" step yet for custom inference endpoint
|
165 |
// you probably don't need it anyway, as you probably want to deploy an all-in-one model instead for perf reasons
|
166 |
|
|
|
|
|
|
|
167 |
if (renderingEngine === "INFERENCE_API") {
|
168 |
try {
|
169 |
const refinerModelUrl = `https://api-inference.huggingface.co/models/${huggingFaceInferenceApiRefinerModel}`
|
170 |
|
171 |
+
|
172 |
+
|
173 |
const res = await fetch(refinerModelUrl, {
|
174 |
method: "POST",
|
175 |
headers: {
|
|
|
177 |
Authorization: `Bearer ${huggingFaceToken}`,
|
178 |
},
|
179 |
body: JSON.stringify({
|
180 |
+
inputs: Buffer.from(blob).toString('base64'),
|
181 |
parameters: {
|
182 |
prompt: positivePrompt,
|
183 |
+
num_inference_steps: nbInferenceSteps,
|
184 |
+
guidance_scale: guidanceScale,
|
185 |
width,
|
186 |
height,
|
187 |
},
|
188 |
+
|
189 |
+
// this doesn't do what you think it does
|
190 |
+
use_cache: false, // withCache,
|
191 |
}),
|
192 |
cache: "no-store",
|
193 |
// we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
|
|
|
197 |
|
198 |
// Recommendation: handle errors
|
199 |
if (res.status !== 200) {
|
200 |
+
const content = await res.json()
|
201 |
+
// if (content.error.include("currently loading")) {
|
202 |
+
// console.log("refiner isn't ready yet")
|
203 |
+
throw new Error(content?.error || 'Failed to fetch data')
|
204 |
}
|
205 |
|
206 |
+
const refinedBlob = await res.arrayBuffer()
|
207 |
|
208 |
const contentType = res.headers.get('content-type')
|
209 |
|
210 |
+
assetUrl = `data:${contentType};base64,${Buffer.from(refinedBlob).toString('base64')}`
|
211 |
|
212 |
} catch (err) {
|
213 |
console.log(`Refiner step failed, but this is not a blocker. Error details: ${err}`)
|
214 |
}
|
215 |
+
}
|
216 |
|
217 |
|
218 |
return {
|
|
|
236 |
prompt,
|
237 |
// negativePrompt, unused for now
|
238 |
nbFrames: 1,
|
239 |
+
nbSteps: nbInferenceSteps, // 20 = fast, 30 = better, 50 = best
|
240 |
actionnables: [], // ["text block"],
|
241 |
segmentation: "disabled", // "firstframe", // one day we will remove this param, to make it automatic
|
242 |
width,
|
src/app/interface/panel/index.tsx
CHANGED
@@ -63,7 +63,12 @@ export function Panel({
|
|
63 |
const delay = enableRateLimiter ? (1000 + (500 * panel)) : 1000
|
64 |
|
65 |
|
66 |
-
const startImageGeneration = ({ prompt, width, height
|
|
|
|
|
|
|
|
|
|
|
67 |
if (!prompt?.length) { return }
|
68 |
|
69 |
// important: update the status, and clear the scene
|
@@ -75,12 +80,37 @@ export function Panel({
|
|
75 |
setTimeout(() => {
|
76 |
startTransition(async () => {
|
77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
let newRendered: RenderedScene
|
79 |
try {
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
} catch (err) {
|
82 |
// "Failed to load the panel! Don't worry, we are retrying..")
|
83 |
-
newRendered = await newRender({
|
|
|
|
|
|
|
|
|
|
|
84 |
}
|
85 |
|
86 |
if (newRendered) {
|
@@ -133,7 +163,12 @@ export function Panel({
|
|
133 |
} else if (newRendered.status === "error" ||
|
134 |
(newRendered.status === "completed" && !newRendered.assetUrl?.length)) {
|
135 |
try {
|
136 |
-
const newAttempt = await newRender({
|
|
|
|
|
|
|
|
|
|
|
137 |
setRendered(panelId, newAttempt)
|
138 |
} catch (err) {
|
139 |
console.error("yeah sorry, something is wrong.. aborting", err)
|
@@ -154,7 +189,7 @@ export function Panel({
|
|
154 |
useEffect(() => {
|
155 |
if (!prompt.length) { return }
|
156 |
|
157 |
-
startImageGeneration({ prompt, width, height })
|
158 |
|
159 |
clearTimeout(timeoutRef.current)
|
160 |
|
|
|
63 |
const delay = enableRateLimiter ? (1000 + (500 * panel)) : 1000
|
64 |
|
65 |
|
66 |
+
const startImageGeneration = ({ prompt, width, height, revision }: {
|
67 |
+
prompt: string
|
68 |
+
width: number
|
69 |
+
height: number
|
70 |
+
revision: number
|
71 |
+
}) => {
|
72 |
if (!prompt?.length) { return }
|
73 |
|
74 |
// important: update the status, and clear the scene
|
|
|
80 |
setTimeout(() => {
|
81 |
startTransition(async () => {
|
82 |
|
83 |
+
const withCache = revision === 0
|
84 |
+
|
85 |
+
// atrocious and very, very, very, very, very, very, very ugly hack for the Inference API
|
86 |
+
// as apparently "use_cache: false" doesn't work, or doesn't do what we want it to do
|
87 |
+
let cacheInvalidationHack = ""
|
88 |
+
const nbMaxRevisions = 6
|
89 |
+
for (let i = 0; i < revision && revision < nbMaxRevisions; i++) {
|
90 |
+
const j = Math.random()
|
91 |
+
cacheInvalidationHack += j < 0.3 ? "_" : j < 0.6 ? "," : "-"
|
92 |
+
}
|
93 |
+
|
94 |
let newRendered: RenderedScene
|
95 |
try {
|
96 |
+
|
97 |
+
newRendered = await newRender({
|
98 |
+
prompt: cacheInvalidationHack + " " + prompt,
|
99 |
+
width,
|
100 |
+
height,
|
101 |
+
|
102 |
+
// TODO: here we never reset the revision, so only the first user
|
103 |
+
// comic will be cached (we should fix that later)
|
104 |
+
withCache: revision === 0
|
105 |
+
})
|
106 |
} catch (err) {
|
107 |
// "Failed to load the panel! Don't worry, we are retrying..")
|
108 |
+
newRendered = await newRender({
|
109 |
+
prompt: cacheInvalidationHack + " " + prompt,
|
110 |
+
width,
|
111 |
+
height,
|
112 |
+
withCache,
|
113 |
+
})
|
114 |
}
|
115 |
|
116 |
if (newRendered) {
|
|
|
163 |
} else if (newRendered.status === "error" ||
|
164 |
(newRendered.status === "completed" && !newRendered.assetUrl?.length)) {
|
165 |
try {
|
166 |
+
const newAttempt = await newRender({
|
167 |
+
prompt,
|
168 |
+
width,
|
169 |
+
height,
|
170 |
+
withCache: false,
|
171 |
+
})
|
172 |
setRendered(panelId, newAttempt)
|
173 |
} catch (err) {
|
174 |
console.error("yeah sorry, something is wrong.. aborting", err)
|
|
|
189 |
useEffect(() => {
|
190 |
if (!prompt.length) { return }
|
191 |
|
192 |
+
startImageGeneration({ prompt, width, height, revision })
|
193 |
|
194 |
clearTimeout(timeoutRef.current)
|
195 |
|
src/app/main.tsx
CHANGED
@@ -10,6 +10,7 @@ import { Zoom } from "./interface/zoom"
|
|
10 |
import { getStory } from "./queries/getStory"
|
11 |
import { BottomBar } from "./interface/bottom-bar"
|
12 |
import { Page } from "./interface/page"
|
|
|
13 |
|
14 |
export default function Main() {
|
15 |
const [_isPending, startTransition] = useTransition()
|
@@ -41,42 +42,57 @@ export default function Main() {
|
|
41 |
// I don't think we are going to need a rate limiter on the LLM part anymore
|
42 |
const enableRateLimiter = false // `${process.env.NEXT_PUBLIC_ENABLE_RATE_LIMITER}` === "true"
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
const llmResponse = await getStory({ preset, prompt })
|
47 |
-
console.log("LLM responded:", llmResponse)
|
48 |
-
|
49 |
-
// we have to limit the size of the prompt, otherwise the rest of the style won't be followed
|
50 |
|
51 |
-
|
52 |
-
if (limitedPrompt.length !== prompt.length) {
|
53 |
-
console.log("Sorry folks, the prompt was cut to:", limitedPrompt)
|
54 |
-
}
|
55 |
|
56 |
-
|
|
|
|
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
for (let p = 0; p < nbPanels; p++) {
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
68 |
}
|
69 |
-
|
70 |
-
setCaptions(newCaptions)
|
71 |
-
setPanels(newPanels)
|
72 |
-
} catch (err) {
|
73 |
console.error(err)
|
74 |
-
} finally {
|
75 |
-
setTimeout(() => {
|
76 |
-
setGeneratingStory(false)
|
77 |
-
setWaitABitMore(false)
|
78 |
-
}, enableRateLimiter ? 12000 : 0)
|
79 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
})
|
81 |
}, [prompt, preset?.label]) // important: we need to react to preset changes too
|
82 |
|
|
|
10 |
import { getStory } from "./queries/getStory"
|
11 |
import { BottomBar } from "./interface/bottom-bar"
|
12 |
import { Page } from "./interface/page"
|
13 |
+
import { LLMResponse } from "@/types"
|
14 |
|
15 |
export default function Main() {
|
16 |
const [_isPending, startTransition] = useTransition()
|
|
|
42 |
// I don't think we are going to need a rate limiter on the LLM part anymore
|
43 |
const enableRateLimiter = false // `${process.env.NEXT_PUBLIC_ENABLE_RATE_LIMITER}` === "true"
|
44 |
|
45 |
+
const nbPanels = 4
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
let llmResponse: LLMResponse = []
|
|
|
|
|
|
|
48 |
|
49 |
+
try {
|
50 |
+
llmResponse = await getStory({ preset, prompt })
|
51 |
+
console.log("LLM responded:", llmResponse)
|
52 |
|
53 |
+
} catch (err) {
|
54 |
+
console.log("LLM step failed due to:", err)
|
55 |
+
console.log("we are now switching to a degraded mode, using 4 similar panels")
|
56 |
+
|
57 |
+
llmResponse = []
|
58 |
for (let p = 0; p < nbPanels; p++) {
|
59 |
+
llmResponse.push({
|
60 |
+
panel: p,
|
61 |
+
instructions: `${prompt} ${".".repeat(p)}`,
|
62 |
+
caption: "(Sorry, LLM generation failed: using degraded mode)"
|
63 |
+
})
|
64 |
}
|
|
|
|
|
|
|
|
|
65 |
console.error(err)
|
|
|
|
|
|
|
|
|
|
|
66 |
}
|
67 |
+
|
68 |
+
// we have to limit the size of the prompt, otherwise the rest of the style won't be followed
|
69 |
+
|
70 |
+
let limitedPrompt = prompt.slice(0, 77)
|
71 |
+
if (limitedPrompt.length !== prompt.length) {
|
72 |
+
console.log("Sorry folks, the prompt was cut to:", limitedPrompt)
|
73 |
+
}
|
74 |
+
|
75 |
+
const panelPromptPrefix = preset.imagePrompt(limitedPrompt).join(", ")
|
76 |
+
|
77 |
+
const newPanels: string[] = []
|
78 |
+
const newCaptions: string[] = []
|
79 |
+
setWaitABitMore(true)
|
80 |
+
console.log("Panel prompts for SDXL:")
|
81 |
+
for (let p = 0; p < nbPanels; p++) {
|
82 |
+
newCaptions.push(llmResponse[p]?.caption || "...")
|
83 |
+
const newPanel = [panelPromptPrefix, llmResponse[p]?.instructions || ""].map(chunk => chunk).join(", ")
|
84 |
+
newPanels.push(newPanel)
|
85 |
+
console.log(newPanel)
|
86 |
+
}
|
87 |
+
|
88 |
+
setCaptions(newCaptions)
|
89 |
+
setPanels(newPanels)
|
90 |
+
|
91 |
+
setTimeout(() => {
|
92 |
+
setGeneratingStory(false)
|
93 |
+
setWaitABitMore(false)
|
94 |
+
}, enableRateLimiter ? 12000 : 0)
|
95 |
+
|
96 |
})
|
97 |
}, [prompt, preset?.label]) // important: we need to react to preset changes too
|
98 |
|
src/app/queries/getStory.ts
CHANGED
@@ -25,9 +25,9 @@ export const getStory = async ({
|
|
25 |
content: [
|
26 |
`You are a comic book author specialized in ${preset.llmPrompt}`,
|
27 |
`Please write detailed drawing instructions and a one-sentence short caption for the 4 panels of a new silent comic book page.`,
|
28 |
-
`Give your response as a JSON array like this: \`Array<{ panel: number; instructions: string; caption: string}>\`.`,
|
29 |
// `Give your response as Markdown bullet points.`,
|
30 |
-
`Be brief in your 4 instructions and captions, don't add your own comments. Be straight to the point, and never reply things like "Sure, I can.." etc.`
|
31 |
].filter(item => item).join("\n")
|
32 |
},
|
33 |
{
|
|
|
25 |
content: [
|
26 |
`You are a comic book author specialized in ${preset.llmPrompt}`,
|
27 |
`Please write detailed drawing instructions and a one-sentence short caption for the 4 panels of a new silent comic book page.`,
|
28 |
+
`Give your response as a VALID JSON array like this: \`Array<{ panel: number; instructions: string; caption: string}>\`.`,
|
29 |
// `Give your response as Markdown bullet points.`,
|
30 |
+
`Be brief in your 4 instructions and captions, don't add your own comments. Be straight to the point, and never reply things like "Sure, I can.." etc. Reply using valid JSON.`
|
31 |
].filter(item => item).join("\n")
|
32 |
},
|
33 |
{
|
src/app/queries/predictWithHuggingFace.ts
CHANGED
@@ -72,6 +72,11 @@ export async function predict(inputs: string): Promise<string> {
|
|
72 |
}
|
73 |
} catch (err) {
|
74 |
console.error(`error during generation: ${err}`)
|
|
|
|
|
|
|
|
|
|
|
75 |
}
|
76 |
|
77 |
// need to do some cleanup of the garbage the LLM might have gave us
|
|
|
72 |
}
|
73 |
} catch (err) {
|
74 |
console.error(`error during generation: ${err}`)
|
75 |
+
|
76 |
+
// a common issue with Llama-2 might be that the model receives too many requests
|
77 |
+
if (`${err}` === "Error: Model is overloaded") {
|
78 |
+
instructions = ``
|
79 |
+
}
|
80 |
}
|
81 |
|
82 |
// need to do some cleanup of the garbage the LLM might have gave us
|