Spaces:
Build error
Build error
V.0.3.1.8 π Bugfixes (#37)
Browse files* Updated Packages
* Bugfix: Check if requests exists before deleting
* Bugfix: Update Collections not working
Changed Put to Post req, moved to sub api route
- backend/backend/app/api/routers/chat.py +4 -4
- backend/backend/app/api/routers/query.py +1 -1
- backend/backend/app/api/routers/search.py +2 -2
- backend/backend/app/utils/contants.py +3 -3
- backend/backend/app/utils/index.py +14 -11
- backend/backend/app/utils/prompt_template.py +1 -1
- backend/poetry.lock +0 -0
- backend/pyproject.toml +4 -2
- frontend/app/api/admin/collections/route.ts +0 -41
- frontend/app/api/admin/collections/update/route.ts +60 -0
- frontend/app/components/ui/admin/admin-manage-collections.tsx +4 -4
- frontend/package-lock.json +18 -18
backend/backend/app/api/routers/chat.py
CHANGED
@@ -4,10 +4,10 @@ from typing import List
|
|
4 |
from fastapi import APIRouter, Depends, HTTPException, Request, status
|
5 |
from fastapi.responses import StreamingResponse
|
6 |
from fastapi.websockets import WebSocketDisconnect
|
7 |
-
from llama_index.llms
|
8 |
-
from llama_index.llms
|
9 |
-
from llama_index.memory import ChatMemoryBuffer
|
10 |
-
from llama_index.
|
11 |
from pydantic import BaseModel
|
12 |
|
13 |
from backend.app.utils import auth
|
|
|
4 |
from fastapi import APIRouter, Depends, HTTPException, Request, status
|
5 |
from fastapi.responses import StreamingResponse
|
6 |
from fastapi.websockets import WebSocketDisconnect
|
7 |
+
from llama_index.core.llms import ChatMessage
|
8 |
+
from llama_index.core.llms import MessageRole
|
9 |
+
from llama_index.core.memory import ChatMemoryBuffer
|
10 |
+
from llama_index.core import PromptTemplate
|
11 |
from pydantic import BaseModel
|
12 |
|
13 |
from backend.app.utils import auth
|
backend/backend/app/api/routers/query.py
CHANGED
@@ -4,7 +4,7 @@ from typing import List
|
|
4 |
from fastapi import APIRouter, Depends, HTTPException, Request, status
|
5 |
from fastapi.responses import StreamingResponse
|
6 |
from fastapi.websockets import WebSocketDisconnect
|
7 |
-
from llama_index.llms
|
8 |
from pydantic import BaseModel
|
9 |
|
10 |
from backend.app.utils import auth
|
|
|
4 |
from fastapi import APIRouter, Depends, HTTPException, Request, status
|
5 |
from fastapi.responses import StreamingResponse
|
6 |
from fastapi.websockets import WebSocketDisconnect
|
7 |
+
from llama_index.core.llms import MessageRole
|
8 |
from pydantic import BaseModel
|
9 |
|
10 |
from backend.app.utils import auth
|
backend/backend/app/api/routers/search.py
CHANGED
@@ -2,8 +2,8 @@ import logging
|
|
2 |
import re
|
3 |
|
4 |
from fastapi import APIRouter, Depends, HTTPException, status
|
5 |
-
from llama_index.postprocessor import SimilarityPostprocessor
|
6 |
-
from llama_index.retrievers import VectorIndexRetriever
|
7 |
|
8 |
from backend.app.utils import auth
|
9 |
from backend.app.utils.index import get_index
|
|
|
2 |
import re
|
3 |
|
4 |
from fastapi import APIRouter, Depends, HTTPException, status
|
5 |
+
from llama_index.core.postprocessor import SimilarityPostprocessor
|
6 |
+
from llama_index.core.retrievers import VectorIndexRetriever
|
7 |
|
8 |
from backend.app.utils import auth
|
9 |
from backend.app.utils.index import get_index
|
backend/backend/app/utils/contants.py
CHANGED
@@ -29,7 +29,7 @@ LLM_TEMPERATURE = 0.1
|
|
29 |
MODEL_KWARGS = {"n_gpu_layers": 100} if DEVICE_TYPE == "cuda" else {}
|
30 |
|
31 |
# Service Context Constants
|
32 |
-
CHUNK_SIZE =
|
33 |
CHUNK_OVERLAP = 100
|
34 |
|
35 |
# Embedding Model Constants
|
@@ -37,12 +37,12 @@ EMBED_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
|
|
37 |
EMBED_POOLING = "mean"
|
38 |
EMBED_MODEL_DIMENSIONS = 384 # MiniLM-L6-v2 uses 384 dimensions
|
39 |
DEF_EMBED_MODEL_DIMENSIONS = (
|
40 |
-
1536 # Default embedding model dimensions used by OpenAI text-embedding-
|
41 |
)
|
42 |
EMBED_BATCH_SIZE = 64 # batch size for openai embeddings
|
43 |
|
44 |
# Chat Memory Buffer Constants
|
45 |
-
MEMORY_TOKEN_LIMIT =
|
46 |
|
47 |
# Prompt Helper Constants
|
48 |
# set maximum input size
|
|
|
29 |
MODEL_KWARGS = {"n_gpu_layers": 100} if DEVICE_TYPE == "cuda" else {}
|
30 |
|
31 |
# Service Context Constants
|
32 |
+
CHUNK_SIZE = 1024
|
33 |
CHUNK_OVERLAP = 100
|
34 |
|
35 |
# Embedding Model Constants
|
|
|
37 |
EMBED_POOLING = "mean"
|
38 |
EMBED_MODEL_DIMENSIONS = 384 # MiniLM-L6-v2 uses 384 dimensions
|
39 |
DEF_EMBED_MODEL_DIMENSIONS = (
|
40 |
+
1536 # Default embedding model dimensions used by OpenAI text-embedding-3-small
|
41 |
)
|
42 |
EMBED_BATCH_SIZE = 64 # batch size for openai embeddings
|
43 |
|
44 |
# Chat Memory Buffer Constants
|
45 |
+
MEMORY_TOKEN_LIMIT = 3072 if USE_LOCAL_LLM else 6144
|
46 |
|
47 |
# Prompt Helper Constants
|
48 |
# set maximum input size
|
backend/backend/app/utils/index.py
CHANGED
@@ -2,23 +2,23 @@ import logging
|
|
2 |
import os
|
3 |
|
4 |
from dotenv import load_dotenv
|
5 |
-
from llama_index import (
|
6 |
PromptHelper,
|
7 |
ServiceContext,
|
8 |
-
# Document,
|
9 |
SimpleDirectoryReader,
|
10 |
StorageContext,
|
11 |
VectorStoreIndex,
|
12 |
load_index_from_storage,
|
13 |
set_global_service_context,
|
14 |
)
|
15 |
-
from llama_index.embeddings import
|
16 |
-
from llama_index.embeddings.
|
17 |
-
from llama_index.llms import
|
18 |
-
from llama_index.llms.llama_utils import (
|
19 |
completion_to_prompt,
|
20 |
messages_to_prompt,
|
21 |
)
|
|
|
|
|
22 |
from llama_index.vector_stores.supabase import SupabaseVectorStore
|
23 |
from vecs import IndexMeasure
|
24 |
|
@@ -45,7 +45,6 @@ from backend.app.utils.contants import (
|
|
45 |
USE_LOCAL_VECTOR_STORE,
|
46 |
)
|
47 |
|
48 |
-
# from llama_index.vector_stores.supabase import SupabaseVectorStore
|
49 |
# import textwrap
|
50 |
|
51 |
load_dotenv()
|
@@ -98,7 +97,11 @@ else:
|
|
98 |
api_key=os.getenv("OPENAI_API_KEY"),
|
99 |
)
|
100 |
# By default, LlamaIndex uses text-embedding-ada-002 from OpenAI
|
101 |
-
|
|
|
|
|
|
|
|
|
102 |
|
103 |
prompt_helper = PromptHelper(
|
104 |
chunk_size_limit=CHUNK_SIZE_LIMIT,
|
@@ -149,11 +152,11 @@ def create_index():
|
|
149 |
show_progress=True,
|
150 |
)
|
151 |
# store it for later
|
152 |
-
index.storage_context.persist(
|
153 |
-
logger.info(f"Finished creating new index. Stored in {
|
154 |
else:
|
155 |
# do nothing
|
156 |
-
logger.info(f"Index already exist at {
|
157 |
# else, create & store the index in Supabase pgvector
|
158 |
else:
|
159 |
# get the folders in the data directory
|
|
|
2 |
import os
|
3 |
|
4 |
from dotenv import load_dotenv
|
5 |
+
from llama_index.core import (
|
6 |
PromptHelper,
|
7 |
ServiceContext,
|
|
|
8 |
SimpleDirectoryReader,
|
9 |
StorageContext,
|
10 |
VectorStoreIndex,
|
11 |
load_index_from_storage,
|
12 |
set_global_service_context,
|
13 |
)
|
14 |
+
from llama_index.embeddings.openai import OpenAIEmbedding, OpenAIEmbeddingModelType
|
15 |
+
from llama_index.legacy.embeddings.huggingface import HuggingFaceEmbedding
|
16 |
+
from llama_index.legacy.llms.llama_utils import (
|
|
|
17 |
completion_to_prompt,
|
18 |
messages_to_prompt,
|
19 |
)
|
20 |
+
from llama_index.llms.llama_cpp import LlamaCPP
|
21 |
+
from llama_index.llms.openai import OpenAI
|
22 |
from llama_index.vector_stores.supabase import SupabaseVectorStore
|
23 |
from vecs import IndexMeasure
|
24 |
|
|
|
45 |
USE_LOCAL_VECTOR_STORE,
|
46 |
)
|
47 |
|
|
|
48 |
# import textwrap
|
49 |
|
50 |
load_dotenv()
|
|
|
97 |
api_key=os.getenv("OPENAI_API_KEY"),
|
98 |
)
|
99 |
# By default, LlamaIndex uses text-embedding-ada-002 from OpenAI
|
100 |
+
# Set the model to text-embed-3-small for better performance and cheaper cost
|
101 |
+
embed_model = OpenAIEmbedding(
|
102 |
+
model=OpenAIEmbeddingModelType.TEXT_EMBED_3_SMALL,
|
103 |
+
embed_batch_size=EMBED_BATCH_SIZE,
|
104 |
+
)
|
105 |
|
106 |
prompt_helper = PromptHelper(
|
107 |
chunk_size_limit=CHUNK_SIZE_LIMIT,
|
|
|
152 |
show_progress=True,
|
153 |
)
|
154 |
# store it for later
|
155 |
+
index.storage_context.persist(new_storage_dir)
|
156 |
+
logger.info(f"Finished creating new index. Stored in {new_storage_dir}")
|
157 |
else:
|
158 |
# do nothing
|
159 |
+
logger.info(f"Index already exist at {new_storage_dir}...")
|
160 |
# else, create & store the index in Supabase pgvector
|
161 |
else:
|
162 |
# get the folders in the data directory
|
backend/backend/app/utils/prompt_template.py
CHANGED
@@ -4,7 +4,7 @@ Modify the prompt template based on the model you select.
|
|
4 |
This seems to have significant impact on the output of the LLM.
|
5 |
"""
|
6 |
|
7 |
-
from llama_index.
|
8 |
|
9 |
# this is specific to Llama-2.
|
10 |
|
|
|
4 |
This seems to have significant impact on the output of the LLM.
|
5 |
"""
|
6 |
|
7 |
+
from llama_index.core import PromptTemplate
|
8 |
|
9 |
# this is specific to Llama-2.
|
10 |
|
backend/poetry.lock
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
backend/pyproject.toml
CHANGED
@@ -10,8 +10,7 @@ packages = [{ include = "backend" }]
|
|
10 |
python = "^3.11,<3.12"
|
11 |
fastapi = "^0.109.1"
|
12 |
uvicorn = { extras = ["standard"], version = "^0.23.2" }
|
13 |
-
|
14 |
-
pypdf = "^3.17.4"
|
15 |
python-dotenv = "^1.0.0"
|
16 |
llama-cpp-python = "^0.2.52"
|
17 |
transformers = "^4.38.1"
|
@@ -22,6 +21,9 @@ pyjwt = "^2.8.0"
|
|
22 |
vecs = "^0.4.3"
|
23 |
python-multipart = "^0.0.9"
|
24 |
asyncpg = "^0.29.0"
|
|
|
|
|
|
|
25 |
|
26 |
[tool.poetry.group.dev]
|
27 |
optional = true
|
|
|
10 |
python = "^3.11,<3.12"
|
11 |
fastapi = "^0.109.1"
|
12 |
uvicorn = { extras = ["standard"], version = "^0.23.2" }
|
13 |
+
pypdf = "^4.3.0"
|
|
|
14 |
python-dotenv = "^1.0.0"
|
15 |
llama-cpp-python = "^0.2.52"
|
16 |
transformers = "^4.38.1"
|
|
|
21 |
vecs = "^0.4.3"
|
22 |
python-multipart = "^0.0.9"
|
23 |
asyncpg = "^0.29.0"
|
24 |
+
llama-index = "^0.10.55"
|
25 |
+
llama-index-vector-stores-supabase = "^0.1.5"
|
26 |
+
llama-index-llms-llama-cpp = "^0.1.4"
|
27 |
|
28 |
[tool.poetry.group.dev]
|
29 |
optional = true
|
frontend/app/api/admin/collections/route.ts
CHANGED
@@ -38,44 +38,3 @@ export async function GET(request: NextRequest) {
|
|
38 |
|
39 |
return NextResponse.json({ collections: collections });
|
40 |
}
|
41 |
-
|
42 |
-
// PUT request to update the collection data in the database
|
43 |
-
export async function PUT(request: NextRequest) {
|
44 |
-
// Create a new Supabase client
|
45 |
-
const supabase = createClient(
|
46 |
-
process.env.SUPABASE_URL ?? '',
|
47 |
-
process.env.SUPABASE_SERVICE_ROLE_KEY ?? '',
|
48 |
-
{ db: { schema: 'public' } },
|
49 |
-
);
|
50 |
-
|
51 |
-
// Retrieve the collection ID from the request body
|
52 |
-
const { collection_id, is_public } = await request.json();
|
53 |
-
|
54 |
-
// Update the collection data in the database
|
55 |
-
const { data: updateData, error: updateError } = await supabase
|
56 |
-
.from('collections')
|
57 |
-
.update({ is_public: is_public })
|
58 |
-
.eq('collection_id', collection_id);
|
59 |
-
|
60 |
-
if (updateError) {
|
61 |
-
console.error('Error updating collection data in database:', updateError.message);
|
62 |
-
return NextResponse.json({ error: updateError.message }, { status: 500 });
|
63 |
-
}
|
64 |
-
|
65 |
-
// console.log('Updated collection:', data);
|
66 |
-
|
67 |
-
// Delete the collection requests data in the database (Since it is manually updated by Admin)
|
68 |
-
const { data: delData, error: delError } = await supabase
|
69 |
-
.from('collections_requests')
|
70 |
-
.delete()
|
71 |
-
.eq('collection_id', collection_id);
|
72 |
-
|
73 |
-
if (delError) {
|
74 |
-
console.error('Error deleting collection requests data in database:', delError.message);
|
75 |
-
return NextResponse.json({ error: delError.message }, { status: 500 });
|
76 |
-
}
|
77 |
-
|
78 |
-
// console.log('Deleted collection requests:', delData);
|
79 |
-
|
80 |
-
return NextResponse.json({ message: 'Collection updated successfully' });
|
81 |
-
}
|
|
|
38 |
|
39 |
return NextResponse.json({ collections: collections });
|
40 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
frontend/app/api/admin/collections/update/route.ts
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export const revalidate = 10;
|
2 |
+
|
3 |
+
import { createClient } from '@supabase/supabase-js';
|
4 |
+
import { NextRequest, NextResponse } from "next/server";
|
5 |
+
|
6 |
+
// POST request to update the collection data in the database
|
7 |
+
export async function POST(request: NextRequest) {
|
8 |
+
// Create a new Supabase client
|
9 |
+
const supabase = createClient(
|
10 |
+
process.env.SUPABASE_URL ?? '',
|
11 |
+
process.env.SUPABASE_SERVICE_ROLE_KEY ?? '',
|
12 |
+
{ db: { schema: 'public' } },
|
13 |
+
);
|
14 |
+
|
15 |
+
// Retrieve the collection ID from the request body
|
16 |
+
const { collection_id, is_public } = await request.json();
|
17 |
+
|
18 |
+
// Update the collection data in the database
|
19 |
+
const { data: updateData, error: updateError } = await supabase
|
20 |
+
.from('collections')
|
21 |
+
.update({ is_public: is_public })
|
22 |
+
.eq('collection_id', collection_id);
|
23 |
+
|
24 |
+
if (updateError) {
|
25 |
+
console.error('Error updating collection data in database:', updateError.message);
|
26 |
+
return NextResponse.json({ error: updateError.message }, { status: 500 });
|
27 |
+
}
|
28 |
+
|
29 |
+
// console.log('Updated collection:', updateData);
|
30 |
+
|
31 |
+
// Check if there is an existing collection request for the collection
|
32 |
+
const { data: collReq, error: collReqError } = await supabase
|
33 |
+
.from('collections_requests')
|
34 |
+
.select('collection_id')
|
35 |
+
.eq('collection_id', collection_id);
|
36 |
+
|
37 |
+
if (collReqError) {
|
38 |
+
console.error('Error fetching collection requests data from database:', collReqError.message);
|
39 |
+
return NextResponse.json({ error: collReqError.message }, { status: 500 });
|
40 |
+
}
|
41 |
+
|
42 |
+
// console.log('Collection requests:', collReq);
|
43 |
+
|
44 |
+
// If there is an existing collection request, delete it
|
45 |
+
if (collReq.length === 1) {
|
46 |
+
const { data: delData, error: delError } = await supabase
|
47 |
+
.from('collections_requests')
|
48 |
+
.delete()
|
49 |
+
.eq('collection_id', collection_id);
|
50 |
+
|
51 |
+
if (delError) {
|
52 |
+
console.error('Error deleting collection requests data in database:', delError.message);
|
53 |
+
return NextResponse.json({ error: delError.message }, { status: 500 });
|
54 |
+
}
|
55 |
+
|
56 |
+
// console.log('Deleted collection requests:', delData);
|
57 |
+
}
|
58 |
+
|
59 |
+
return NextResponse.json({ message: 'Collection updated successfully' });
|
60 |
+
}
|
frontend/app/components/ui/admin/admin-manage-collections.tsx
CHANGED
@@ -70,8 +70,8 @@ export default function AdminManageCollections() {
|
|
70 |
}).then((result) => {
|
71 |
if (result.isConfirmed) {
|
72 |
// if user confirms, send request to server
|
73 |
-
fetch(`/api/admin/collections`, {
|
74 |
-
method: '
|
75 |
headers: {
|
76 |
'Content-Type': 'application/json',
|
77 |
},
|
@@ -131,8 +131,8 @@ export default function AdminManageCollections() {
|
|
131 |
}).then((result) => {
|
132 |
if (result.isConfirmed) {
|
133 |
// if user confirms, send request to server
|
134 |
-
fetch(`/api/admin/collections`, {
|
135 |
-
method: '
|
136 |
headers: {
|
137 |
'Content-Type': 'application/json',
|
138 |
},
|
|
|
70 |
}).then((result) => {
|
71 |
if (result.isConfirmed) {
|
72 |
// if user confirms, send request to server
|
73 |
+
fetch(`/api/admin/collections/update`, {
|
74 |
+
method: 'POST',
|
75 |
headers: {
|
76 |
'Content-Type': 'application/json',
|
77 |
},
|
|
|
131 |
}).then((result) => {
|
132 |
if (result.isConfirmed) {
|
133 |
// if user confirms, send request to server
|
134 |
+
fetch(`/api/admin/collections/update`, {
|
135 |
+
method: 'POST',
|
136 |
headers: {
|
137 |
'Content-Type': 'application/json',
|
138 |
},
|
frontend/package-lock.json
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
{
|
2 |
"name": "smart-retrieval",
|
3 |
-
"version": "0.
|
4 |
"lockfileVersion": 3,
|
5 |
"requires": true,
|
6 |
"packages": {
|
7 |
"": {
|
8 |
"name": "smart-retrieval",
|
9 |
-
"version": "0.
|
10 |
"dependencies": {
|
11 |
"@auth/supabase-adapter": "^0.6.0",
|
12 |
"@nextui-org/react": "^2.2.9",
|
@@ -4232,11 +4232,11 @@
|
|
4232 |
}
|
4233 |
},
|
4234 |
"node_modules/braces": {
|
4235 |
-
"version": "3.0.
|
4236 |
-
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.
|
4237 |
-
"integrity": "sha512-
|
4238 |
"dependencies": {
|
4239 |
-
"fill-range": "^7.
|
4240 |
},
|
4241 |
"engines": {
|
4242 |
"node": ">=8"
|
@@ -5688,9 +5688,9 @@
|
|
5688 |
"peer": true
|
5689 |
},
|
5690 |
"node_modules/fast-loops": {
|
5691 |
-
"version": "1.1.
|
5692 |
-
"resolved": "https://registry.npmjs.org/fast-loops/-/fast-loops-1.1.
|
5693 |
-
"integrity": "sha512-
|
5694 |
},
|
5695 |
"node_modules/fast-shallow-equal": {
|
5696 |
"version": "1.0.0",
|
@@ -5735,9 +5735,9 @@
|
|
5735 |
}
|
5736 |
},
|
5737 |
"node_modules/fill-range": {
|
5738 |
-
"version": "7.
|
5739 |
-
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.
|
5740 |
-
"integrity": "sha512-
|
5741 |
"dependencies": {
|
5742 |
"to-regex-range": "^5.0.1"
|
5743 |
},
|
@@ -12758,9 +12758,9 @@
|
|
12758 |
"peer": true
|
12759 |
},
|
12760 |
"node_modules/sweetalert2": {
|
12761 |
-
"version": "11.
|
12762 |
-
"resolved": "https://registry.npmjs.org/sweetalert2/-/sweetalert2-11.
|
12763 |
-
"integrity": "sha512-
|
12764 |
"funding": {
|
12765 |
"type": "individual",
|
12766 |
"url": "https://github.com/sponsors/limonte"
|
@@ -13691,9 +13691,9 @@
|
|
13691 |
"peer": true
|
13692 |
},
|
13693 |
"node_modules/ws": {
|
13694 |
-
"version": "8.
|
13695 |
-
"resolved": "https://registry.npmjs.org/ws/-/ws-8.
|
13696 |
-
"integrity": "sha512-
|
13697 |
"engines": {
|
13698 |
"node": ">=10.0.0"
|
13699 |
},
|
|
|
1 |
{
|
2 |
"name": "smart-retrieval",
|
3 |
+
"version": "0.3.0",
|
4 |
"lockfileVersion": 3,
|
5 |
"requires": true,
|
6 |
"packages": {
|
7 |
"": {
|
8 |
"name": "smart-retrieval",
|
9 |
+
"version": "0.3.0",
|
10 |
"dependencies": {
|
11 |
"@auth/supabase-adapter": "^0.6.0",
|
12 |
"@nextui-org/react": "^2.2.9",
|
|
|
4232 |
}
|
4233 |
},
|
4234 |
"node_modules/braces": {
|
4235 |
+
"version": "3.0.3",
|
4236 |
+
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
|
4237 |
+
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
|
4238 |
"dependencies": {
|
4239 |
+
"fill-range": "^7.1.1"
|
4240 |
},
|
4241 |
"engines": {
|
4242 |
"node": ">=8"
|
|
|
5688 |
"peer": true
|
5689 |
},
|
5690 |
"node_modules/fast-loops": {
|
5691 |
+
"version": "1.1.4",
|
5692 |
+
"resolved": "https://registry.npmjs.org/fast-loops/-/fast-loops-1.1.4.tgz",
|
5693 |
+
"integrity": "sha512-8dbd3XWoKCTms18ize6JmQF1SFnnfj5s0B7rRry22EofgMu7B6LKHVh+XfFqFGsqnbH54xgeO83PzpKI+ODhlg=="
|
5694 |
},
|
5695 |
"node_modules/fast-shallow-equal": {
|
5696 |
"version": "1.0.0",
|
|
|
5735 |
}
|
5736 |
},
|
5737 |
"node_modules/fill-range": {
|
5738 |
+
"version": "7.1.1",
|
5739 |
+
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
|
5740 |
+
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
|
5741 |
"dependencies": {
|
5742 |
"to-regex-range": "^5.0.1"
|
5743 |
},
|
|
|
12758 |
"peer": true
|
12759 |
},
|
12760 |
"node_modules/sweetalert2": {
|
12761 |
+
"version": "11.12.2",
|
12762 |
+
"resolved": "https://registry.npmjs.org/sweetalert2/-/sweetalert2-11.12.2.tgz",
|
12763 |
+
"integrity": "sha512-Rwv5iRYlApkDSXeX22aLhhWMlWPzFxnNBVLZajkFKYhaVEfQkMOPQQRhBtSFxKBPCoko9U3SccWm9hI4o3Id0Q==",
|
12764 |
"funding": {
|
12765 |
"type": "individual",
|
12766 |
"url": "https://github.com/sponsors/limonte"
|
|
|
13691 |
"peer": true
|
13692 |
},
|
13693 |
"node_modules/ws": {
|
13694 |
+
"version": "8.18.0",
|
13695 |
+
"resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz",
|
13696 |
+
"integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==",
|
13697 |
"engines": {
|
13698 |
"node": ">=10.0.0"
|
13699 |
},
|