M96820 commited on
Commit
ee1540e
·
unverified ·
1 Parent(s): f370e63

clean: delete all useless functions

Browse files
ai-comic-factory/src/app/queries/getLLMEngineFunction.ts CHANGED
@@ -1,19 +1,6 @@
1
- import { LLMEngine } from "@/types"
2
- import { predict as predictWithHuggingFace } from "./predictWithHuggingFace"
3
- import { predict as predictWithOpenAI } from "./predictWithOpenAI"
4
- import { predict as predictWithGroq } from "./predictWithGroq"
5
- import { predict as predictWithAnthropic } from "./predictWithAnthropic"
6
 
7
- export const defaultLLMEngineName = `${process.env.LLM_ENGINE || ""}` as LLMEngine
8
-
9
- export function getLLMEngineFunction(llmEngineName: LLMEngine = defaultLLMEngineName) {
10
- const llmEngineFunction =
11
- llmEngineName === "GROQ" ? predictWithGroq :
12
- llmEngineName === "ANTHROPIC" ? predictWithAnthropic :
13
- llmEngineName === "OPENAI" ? predictWithOpenAI :
14
- predictWithHuggingFace
15
-
16
- return llmEngineFunction
17
- }
18
 
19
- export const defaultLLMEngineFunction = getLLMEngineFunction()
 
 
1
+ import { LLMEngine } from '@/types';
 
 
 
 
2
 
3
+ import { predict as predictWithHuggingFace } from './predictWithHuggingFace';
 
 
 
 
 
 
 
 
 
 
4
 
5
+ export const defaultLLMEngineName = `${process.env.LLM_ENGINE || ""}` as LLMEngine
6
+ export const defaultLLMEngineFunction = predictWithHuggingFace
ai-comic-factory/src/app/queries/mockLLMResponse.ts DELETED
@@ -1,51 +0,0 @@
1
- import { GeneratedPanels } from "@/types"
2
-
3
- export const mockGeneratedPanels: GeneratedPanels = [{
4
- "panel": 1,
5
- "instructions": "wide shot of detective walking towards a UFO crash site",
6
- "speech": "Hmm.. interesting.",
7
- "caption": "Detective Jameson investigates a UFO crash in the desert"
8
- },
9
- {
10
- "panel": 2,
11
- "instructions": "close-up of detective's face, determined expression",
12
- "speech": "I've been tracking this case for weeks",
13
- "caption": "He's been tracking this case for weeks"
14
- },
15
- {
16
- "panel": 3,
17
- "instructions": "medium shot of detective examining UFO debris",
18
- "speech": "...",
19
- "caption": "The evidence is scattered all over the desert"
20
- },
21
- {
22
- "panel": 4,
23
- "instructions": "close-up of strange symbol on UFO debris",
24
- "speech": " what does this symbol mean?",
25
- "caption": "strange symbols"
26
- },
27
- {
28
- "panel": 5,
29
- "instructions": "wide shot of detective walking towards a strange rock formation",
30
- "speech": "I've been tracking this case for weeks",
31
- "caption": "Jameson follows a trail that leads him deeper into the desert"
32
- },
33
- {
34
- "panel": 6,
35
- "instructions": "medium shot of detective discovering an alien body",
36
- "speech": "I'm not alone in the desert",
37
- "caption": "He's not alone"
38
- },
39
- {
40
- "panel": 7,
41
- "instructions": "close-up of alien's face, eyes closed, peaceful expression",
42
- "speech": "...?",
43
- "caption": "An alien life form, deceased"
44
- },
45
- {
46
- "panel": 8,
47
- "instructions": "wide shot of detective standing over the alien body, looking up at the sky",
48
- "speech": "what other secrets lie beyond the stars?",
49
- "caption": "Jameson wonders"
50
- }
51
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ai-comic-factory/src/app/queries/predict.ts CHANGED
@@ -1,23 +1,16 @@
1
  "use server"
2
 
3
- import { LLMEngine, LLMPredictionFunctionParams } from "@/types"
4
- import { defaultLLMEngineName, getLLMEngineFunction } from "./getLLMEngineFunction"
5
 
6
- export async function predict(params: LLMPredictionFunctionParams): Promise<string> {
7
- const { llmVendorConfig: { vendor } } = params
8
- // LLMVendor = what the user configure in the UI (eg. a dropdown item called default server)
9
- // LLMEngine = the actual engine to use (eg. hugging face)
10
- const llmEngineName: LLMEngine =
11
- vendor === "ANTHROPIC" ? "ANTHROPIC" :
12
- vendor === "GROQ" ? "GROQ" :
13
- vendor === "OPENAI" ? "OPENAI" :
14
- defaultLLMEngineName
15
 
16
- const llmEngineFunction = getLLMEngineFunction(llmEngineName)
 
 
17
 
18
  // console.log("predict: using " + llmEngineName)
19
  const results = await llmEngineFunction(params)
20
 
21
  // console.log("predict: result: " + results)
22
  return results
23
- }
 
1
  "use server"
2
 
3
+ import { LLMPredictionFunctionParams } from '@/types';
 
4
 
5
+ import { defaultLLMEngineFunction } from './getLLMEngineFunction';
 
 
 
 
 
 
 
 
6
 
7
+ export async function predict(params: LLMPredictionFunctionParams): Promise<string> {
8
+ // const llmEngineName = defaultLLMEngineName
9
+ const llmEngineFunction = defaultLLMEngineFunction
10
 
11
  // console.log("predict: using " + llmEngineName)
12
  const results = await llmEngineFunction(params)
13
 
14
  // console.log("predict: result: " + results)
15
  return results
16
+ }
ai-comic-factory/src/app/queries/predictWithAnthropic.ts DELETED
@@ -1,48 +0,0 @@
1
- "use server"
2
-
3
- import { LLMPredictionFunctionParams } from '@/types';
4
- import Anthropic from '@anthropic-ai/sdk';
5
- import { MessageParam } from '@anthropic-ai/sdk/resources';
6
-
7
- export async function predict({
8
- systemPrompt,
9
- userPrompt,
10
- nbMaxNewTokens,
11
- llmVendorConfig
12
- }: LLMPredictionFunctionParams): Promise<string> {
13
- const anthropicApiKey = `${
14
- llmVendorConfig.apiKey ||
15
- process.env.AUTH_ANTHROPIC_API_KEY ||
16
- ""
17
- }`
18
- const anthropicApiModel = `${
19
- llmVendorConfig.modelId ||
20
- process.env.LLM_ANTHROPIC_API_MODEL ||
21
- "claude-3-opus-20240229"
22
- }`
23
- if (!anthropicApiKey) { throw new Error(`cannot call Anthropic without an API key`) }
24
-
25
- const anthropic = new Anthropic({
26
- apiKey: anthropicApiKey,
27
- })
28
-
29
- const messages: MessageParam[] = [
30
- { role: "user", content: userPrompt },
31
- ]
32
-
33
- try {
34
- const res = await anthropic.messages.create({
35
- messages: messages,
36
- // stream: false,
37
- system: systemPrompt,
38
- model: anthropicApiModel,
39
- // temperature: 0.8,
40
- max_tokens: nbMaxNewTokens,
41
- })
42
-
43
- return (res.content[0] as any)?.text || ""
44
- } catch (err) {
45
- console.error(`error during generation: ${err}`)
46
- return ""
47
- }
48
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ai-comic-factory/src/app/queries/predictWithGroq.ts DELETED
@@ -1,48 +0,0 @@
1
- "use server"
2
-
3
- import { LLMPredictionFunctionParams } from "@/types"
4
- import Groq from "groq-sdk"
5
-
6
- export async function predict({
7
- systemPrompt,
8
- userPrompt,
9
- nbMaxNewTokens,
10
- llmVendorConfig
11
- }: LLMPredictionFunctionParams): Promise<string> {
12
- const groqApiKey = `${
13
- llmVendorConfig.apiKey ||
14
- process.env.AUTH_GROQ_API_KEY ||
15
- ""
16
- }`
17
- const groqApiModel = `${
18
- llmVendorConfig.modelId ||
19
- process.env.LLM_GROQ_API_MODEL ||
20
- "mixtral-8x7b-32768"
21
- }`
22
-
23
- if (!groqApiKey) { throw new Error(`cannot call Groq without an API key`) }
24
-
25
- const groq = new Groq({
26
- apiKey: groqApiKey,
27
- })
28
-
29
- const messages: Groq.Chat.Completions.CompletionCreateParams.Message[] = [
30
- { role: "system", content: systemPrompt },
31
- { role: "user", content: userPrompt },
32
- ]
33
-
34
- try {
35
- const res = await groq.chat.completions.create({
36
- messages: messages,
37
- model: groqApiModel,
38
- stream: false,
39
- temperature: 0.5,
40
- max_tokens: nbMaxNewTokens,
41
- })
42
-
43
- return res.choices[0].message.content || ""
44
- } catch (err) {
45
- console.error(`error during generation: ${err}`)
46
- return ""
47
- }
48
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ai-comic-factory/src/app/queries/predictWithOpenAI.ts DELETED
@@ -1,55 +0,0 @@
1
- "use server"
2
-
3
- import type { ChatCompletionMessageParam } from "openai/resources/chat"
4
- import OpenAI from "openai"
5
- import { LLMPredictionFunctionParams } from "@/types"
6
-
7
- export async function predict({
8
- systemPrompt,
9
- userPrompt,
10
- nbMaxNewTokens,
11
- llmVendorConfig
12
- }: LLMPredictionFunctionParams): Promise<string> {
13
- const openaiApiKey = `${
14
- llmVendorConfig.apiKey ||
15
- process.env.AUTH_OPENAI_API_KEY ||
16
- ""
17
- }`
18
- const openaiApiModel = `${
19
- llmVendorConfig.modelId ||
20
- process.env.LLM_OPENAI_API_MODEL ||
21
- "gpt-4-turbo"
22
- }`
23
-
24
- if (!openaiApiKey) { throw new Error(`cannot call OpenAI without an API key`) }
25
-
26
-
27
- const openaiApiBaseUrl = `${process.env.LLM_OPENAI_API_BASE_URL || "https://api.openai.com/v1"}`
28
-
29
- const openai = new OpenAI({
30
- apiKey: openaiApiKey,
31
- baseURL: openaiApiBaseUrl,
32
- })
33
-
34
- const messages: ChatCompletionMessageParam[] = [
35
- { role: "system", content: systemPrompt },
36
- { role: "user", content: userPrompt },
37
- ]
38
-
39
- try {
40
- const res = await openai.chat.completions.create({
41
- messages: messages,
42
- stream: false,
43
- model: openaiApiModel,
44
- temperature: 0.8,
45
- max_tokens: nbMaxNewTokens,
46
-
47
- // TODO: use the nbPanels to define a max token limit
48
- })
49
-
50
- return res.choices[0].message.content || ""
51
- } catch (err) {
52
- console.error(`error during generation: ${err}`)
53
- return ""
54
- }
55
- }