"use client" import { useEffect, useState, useTransition } from "react" import { cn } from "@/lib/utils" import { TopMenu } from "./interface/top-menu" import { fonts } from "@/lib/fonts" import { useStore } from "./store" import { Zoom } from "./interface/zoom" import { getStory } from "./queries/getStory" import { BottomBar } from "./interface/bottom-bar" import { Page } from "./interface/page" import { LLMResponse } from "@/types" export default function Main() { const [_isPending, startTransition] = useTransition() const isGeneratingStory = useStore(state => state.isGeneratingStory) const setGeneratingStory = useStore(state => state.setGeneratingStory) const font = useStore(state => state.font) const preset = useStore(state => state.preset) const prompt = useStore(state => state.prompt) const nbPages = useStore(state => state.nbPages) const nbTotalPanels = useStore(state => state.nbTotalPanels) const setPanels = useStore(state => state.setPanels) const setCaptions = useStore(state => state.setCaptions) const zoomLevel = useStore(state => state.zoomLevel) const [waitABitMore, setWaitABitMore] = useState(false) // react to prompt changes useEffect(() => { if (!prompt) { return } startTransition(async () => { setWaitABitMore(false) setGeneratingStory(true) // I don't think we are going to need a rate limiter on the LLM part anymore const enableRateLimiter = false // `${process.env.NEXT_PUBLIC_ENABLE_RATE_LIMITER}` === "true" let llmResponse: LLMResponse = [] const [stylePrompt, userStoryPrompt] = prompt.split("||") try { llmResponse = await getStory({ preset, prompt: [ `${userStoryPrompt}`, stylePrompt ? `in the following context: ${stylePrompt}` : '' ].filter(x => x).join(", "), nbTotalPanels }) console.log("LLM responded:", llmResponse) } catch (err) { console.log("LLM step failed due to:", err) console.log("we are now switching to a degraded mode, using 4 similar panels") llmResponse = [] for (let p = 0; p < nbTotalPanels; p++) { llmResponse.push({ panel: p, instructions: `${prompt} ${".".repeat(p)}`, caption: "(Sorry, LLM generation failed: using degraded mode)" }) } console.error(err) } // we have to limit the size of the prompt, otherwise the rest of the style won't be followed let limitedStylePrompt = stylePrompt.slice(0, 77) if (limitedStylePrompt.length !== stylePrompt.length) { console.log("Sorry folks, the style prompt was cut to:", limitedStylePrompt) } // new experimental prompt: let's drop the user prompt, and only use the style const lightPanelPromptPrefix = preset.imagePrompt(limitedStylePrompt).filter(x => x).join(", ") // this prompt will be used if the LLM generation failed const degradedPanelPromptPrefix = [ ...preset.imagePrompt(limitedStylePrompt), // we re-inject the story, then userStoryPrompt, ].filter(x => x).join(", ") const newPanels: string[] = [] const newCaptions: string[] = [] setWaitABitMore(true) console.log("Panel prompts for SDXL:") for (let p = 0; p < nbTotalPanels; p++) { newCaptions.push(llmResponse[p]?.caption || "...") const newPanel = [ // what we do here is that ideally we give full control to the LLM for prompting, // unless there was a catastrophic failure, in that case we preserve the original prompt llmResponse[p]?.instructions ? lightPanelPromptPrefix : degradedPanelPromptPrefix, llmResponse[p]?.instructions || "" ].map(chunk => chunk).join(", ") newPanels.push(newPanel) console.log(newPanel) } setCaptions(newCaptions) setPanels(newPanels) setTimeout(() => { setGeneratingStory(false) setWaitABitMore(false) }, enableRateLimiter ? 12000 : 0) }) }, [prompt, preset?.label, nbTotalPanels]) // important: we need to react to preset changes too return (