|
import { LLM_SUMMERIZATION } from "$env/static/private"; |
|
import { generateFromDefaultEndpoint } from "$lib/server/generateFromDefaultEndpoint"; |
|
import type { Message } from "$lib/types/Message"; |
|
|
|
export async function summarize(prompt: string) { |
|
if (!LLM_SUMMERIZATION) { |
|
return prompt.split(/\s+/g).slice(0, 5).join(" "); |
|
} |
|
|
|
const messages: Array<Omit<Message, "id">> = [ |
|
{ from: "user", content: "Who is the president of Gabon?" }, |
|
{ from: "assistant", content: "π¬π¦ President of Gabon" }, |
|
{ from: "user", content: "Who is Julien Chaumond?" }, |
|
{ from: "assistant", content: "π§ Julien Chaumond" }, |
|
{ from: "user", content: "what is 1 + 1?" }, |
|
{ from: "assistant", content: "π’ Simple math operation" }, |
|
{ from: "user", content: "What are the latest news?" }, |
|
{ from: "assistant", content: "π° Latest news" }, |
|
{ from: "user", content: "How to make a great cheesecake?" }, |
|
{ from: "assistant", content: "π° Cheesecake recipe" }, |
|
{ from: "user", content: "what is your favorite movie? do a short answer." }, |
|
{ from: "assistant", content: "π₯ Favorite movie" }, |
|
{ from: "user", content: "Explain the concept of artificial intelligence in one sentence" }, |
|
{ from: "assistant", content: "π€ AI definition" }, |
|
{ from: "user", content: prompt }, |
|
]; |
|
|
|
return await generateFromDefaultEndpoint({ |
|
messages, |
|
preprompt: `You are a summarization AI. You'll never answer a user's question directly, but instead summarize the user's request into a single short sentence of four words or less. Always start your answer with an emoji relevant to the summary.`, |
|
}) |
|
.then((summary) => { |
|
|
|
if (!/\p{Emoji}/u.test(summary.slice(0, 3))) { |
|
return "π¬ " + summary; |
|
} |
|
return summary; |
|
}) |
|
.catch((e) => { |
|
console.error(e); |
|
return null; |
|
}); |
|
} |
|
|