Germano Cavalcante
commited on
Commit
·
35b7e57
1
Parent(s):
8c64fd6
Tools: find_related, wiki_search, gpu_checker, bpy_doc
Browse files- .gitignore +3 -0
- Dockerfile +20 -0
- README.md +3 -4
- __init__.py +0 -0
- config.py +14 -0
- main.py +58 -0
- requirements-fastapi.txt +7 -0
- routers/__init__.py +1 -0
- routers/rag/__init__.py +226 -0
- routers/rag/embeddings_dev_docs.pkl +3 -0
- routers/rag/embeddings_issues.pkl +3 -0
- routers/rag/embeddings_manual.pkl +3 -0
- routers/tool_bpy_doc.py +65 -0
- routers/tool_bpy_doc_v4_1.pkl +3 -0
- routers/tool_calls.py +116 -0
- routers/tool_find_related.py +296 -0
- routers/tool_gpu_checker.py +213 -0
- routers/tool_wiki_search.py +201 -0
- routers/utils_gitea.py +126 -0
- static/favicon.ico +0 -0
- static/privace.txt +35 -0
- utils/generate_bpy_doc.py +224 -0
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
.vs
|
2 |
+
__pycache__/
|
3 |
+
routers/cache
|
Dockerfile
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11-slim
|
2 |
+
|
3 |
+
WORKDIR /code
|
4 |
+
|
5 |
+
COPY requirements-fastapi.txt ./
|
6 |
+
|
7 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements-fastapi.txt
|
8 |
+
|
9 |
+
RUN useradd -m -u 1000 user
|
10 |
+
|
11 |
+
USER user
|
12 |
+
|
13 |
+
ENV HOME=/home/user \
|
14 |
+
PATH=/home/user/.local/bin:$PATH
|
15 |
+
|
16 |
+
WORKDIR $HOME/app
|
17 |
+
|
18 |
+
COPY --chown=user . $HOME/app/
|
19 |
+
|
20 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
@@ -1,12 +1,11 @@
|
|
1 |
---
|
2 |
title: Tools
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
license: apache-2.0
|
9 |
-
short_description: Tools
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
title: Tools
|
3 |
+
emoji: 🌍
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: purple
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
license: apache-2.0
|
|
|
9 |
---
|
10 |
|
11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
__init__.py
ADDED
File without changes
|
config.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic_settings import BaseSettings
|
2 |
+
import os
|
3 |
+
|
4 |
+
|
5 |
+
class Settings(BaseSettings):
|
6 |
+
huggingface_key: str = os.environ.get("huggingface_key")
|
7 |
+
OPENAI_API_KEY: str = os.environ.get("OPENAI_API_KEY")
|
8 |
+
embedding_api: str = "sbert"
|
9 |
+
embedding_model: str = "mano-wii/BAAI_bge-base-en-v1.5-tunned-for-blender-issues"
|
10 |
+
# embedding_api: str = "openai"
|
11 |
+
# embedding_model: str = "text-embedding-ada-002"
|
12 |
+
|
13 |
+
|
14 |
+
settings = Settings()
|
main.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# main.py
|
2 |
+
|
3 |
+
from fastapi import FastAPI
|
4 |
+
from fastapi.middleware.cors import CORSMiddleware
|
5 |
+
from fastapi.responses import HTMLResponse
|
6 |
+
from fastapi.staticfiles import StaticFiles
|
7 |
+
from huggingface_hub import login
|
8 |
+
from config import settings
|
9 |
+
from routers import tool_bpy_doc, tool_gpu_checker, tool_find_related, tool_wiki_search, tool_calls
|
10 |
+
|
11 |
+
login(settings.huggingface_key)
|
12 |
+
|
13 |
+
app = FastAPI(openapi_url="/api/v1/openapi.json",
|
14 |
+
docs_url="/api/v1/docs")
|
15 |
+
|
16 |
+
app.add_middleware(
|
17 |
+
CORSMiddleware,
|
18 |
+
allow_origins=["https://projects.blender.org"],
|
19 |
+
allow_methods=["GET", "POST"],
|
20 |
+
allow_headers=["Authorization", "Content-Type"],
|
21 |
+
allow_credentials=True,
|
22 |
+
)
|
23 |
+
|
24 |
+
app.include_router(
|
25 |
+
tool_bpy_doc.router, prefix="/api/v1", tags=["Tools"])
|
26 |
+
|
27 |
+
app.include_router(
|
28 |
+
tool_gpu_checker.router, prefix="/api/v1", tags=["Tools"])
|
29 |
+
|
30 |
+
app.include_router(
|
31 |
+
tool_find_related.router, prefix="/api/v1", tags=["Tools"])
|
32 |
+
|
33 |
+
app.include_router(
|
34 |
+
tool_wiki_search.router, prefix="/api/v1", tags=["Tools"])
|
35 |
+
|
36 |
+
app.include_router(
|
37 |
+
tool_calls.router, prefix="/api/v1", tags=["Function Calls"])
|
38 |
+
|
39 |
+
|
40 |
+
@app.get("/", response_class=HTMLResponse)
|
41 |
+
async def root():
|
42 |
+
return """
|
43 |
+
<!DOCTYPE html>
|
44 |
+
<html>
|
45 |
+
<head>
|
46 |
+
<title>My Endpoints</title>
|
47 |
+
</head>
|
48 |
+
<body>
|
49 |
+
<h1>Welcome to @mano-wii API</h1>
|
50 |
+
<p>Click the button below to access the documentation:</p>
|
51 |
+
<a href="/api/v1/docs" style="text-decoration: none;">
|
52 |
+
<button style="padding: 10px 20px; background-color: #007BFF; color: #fff; border: none; cursor: pointer;">Go to Documentation</button>
|
53 |
+
</a>
|
54 |
+
</body>
|
55 |
+
</html>
|
56 |
+
"""
|
57 |
+
|
58 |
+
app.mount("/api/v1/static", StaticFiles(directory="static"), name="static")
|
requirements-fastapi.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn[standard]
|
3 |
+
python-multipart
|
4 |
+
pydantic-settings
|
5 |
+
huggingface_hub
|
6 |
+
sentence_transformers
|
7 |
+
openai
|
routers/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# __init__.py
|
routers/rag/__init__.py
ADDED
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# routers/embedding/__init__.py
|
2 |
+
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
import sys
|
6 |
+
import threading
|
7 |
+
import torch
|
8 |
+
from sentence_transformers import SentenceTransformer, util
|
9 |
+
from typing import Dict, List, Tuple, Set, LiteralString
|
10 |
+
|
11 |
+
|
12 |
+
class EmbeddingContext:
|
13 |
+
# These don't change
|
14 |
+
TOKEN_LEN_MAX_FOR_EMBEDDING = 512
|
15 |
+
|
16 |
+
# Set when creating the object
|
17 |
+
lock = None
|
18 |
+
model = None
|
19 |
+
openai_client = None
|
20 |
+
model_name = ''
|
21 |
+
config_type = ''
|
22 |
+
embedding_shape = None
|
23 |
+
embedding_dtype = None
|
24 |
+
embedding_device = None
|
25 |
+
|
26 |
+
# Updates constantly
|
27 |
+
data = {}
|
28 |
+
|
29 |
+
def __init__(self):
|
30 |
+
try:
|
31 |
+
from config import settings
|
32 |
+
except:
|
33 |
+
sys.path.append(os.path.abspath(
|
34 |
+
os.path.join(os.path.dirname(__file__), '../..')))
|
35 |
+
from config import settings
|
36 |
+
|
37 |
+
self.lock = threading.Lock()
|
38 |
+
config_type = settings.embedding_api
|
39 |
+
model_name = settings.embedding_model
|
40 |
+
|
41 |
+
if config_type == 'sbert':
|
42 |
+
self.model = SentenceTransformer(model_name, use_auth_token=False)
|
43 |
+
self.model.max_seq_length = self.TOKEN_LEN_MAX_FOR_EMBEDDING
|
44 |
+
print("Max Sequence Length:", self.model.max_seq_length)
|
45 |
+
|
46 |
+
self.encode = self.encode_sbert
|
47 |
+
if torch.cuda.is_available():
|
48 |
+
self.model = self.model.to('cuda')
|
49 |
+
|
50 |
+
elif config_type == 'openai':
|
51 |
+
from openai import OpenAI
|
52 |
+
self.openai_client = OpenAI(
|
53 |
+
# base_url = settings.openai_api_base
|
54 |
+
api_key=settings.OPENAI_API_KEY,
|
55 |
+
)
|
56 |
+
self.encode = self.encode_openai
|
57 |
+
|
58 |
+
self.model_name = model_name
|
59 |
+
self.config_type = config_type
|
60 |
+
|
61 |
+
tmp = self.encode(['tmp'])
|
62 |
+
self.embedding_shape = tmp.shape[1:]
|
63 |
+
self.embedding_dtype = tmp.dtype
|
64 |
+
self.embedding_device = tmp.device
|
65 |
+
|
66 |
+
def encode(self, texts_to_embed):
|
67 |
+
pass
|
68 |
+
|
69 |
+
def encode_sbert(self, texts_to_embed):
|
70 |
+
return self.model.encode(texts_to_embed, show_progress_bar=True, convert_to_tensor=True, normalize_embeddings=True)
|
71 |
+
|
72 |
+
def encode_openai(self, texts_to_embed):
|
73 |
+
import math
|
74 |
+
import time
|
75 |
+
|
76 |
+
tokens_count = 0
|
77 |
+
for text in texts_to_embed:
|
78 |
+
tokens_count += len(self.get_tokens(text))
|
79 |
+
|
80 |
+
chunks_num = math.ceil(tokens_count / 500000)
|
81 |
+
chunk_size = math.ceil(len(texts_to_embed) / chunks_num)
|
82 |
+
|
83 |
+
embeddings = []
|
84 |
+
for i in range(chunks_num):
|
85 |
+
start = i * chunk_size
|
86 |
+
end = start + chunk_size
|
87 |
+
chunk = texts_to_embed[start:end]
|
88 |
+
|
89 |
+
embeddings_tmp = self.openai_client.embeddings.create(
|
90 |
+
model=self.model_name,
|
91 |
+
input=chunk,
|
92 |
+
).data
|
93 |
+
|
94 |
+
if embeddings_tmp is None:
|
95 |
+
break
|
96 |
+
|
97 |
+
embeddings.extend(embeddings_tmp)
|
98 |
+
|
99 |
+
if i < chunks_num - 1:
|
100 |
+
time.sleep(60) # Wait 1 minute before the next call
|
101 |
+
|
102 |
+
return torch.stack([torch.tensor(embedding.embedding, dtype=torch.float32) for embedding in embeddings])
|
103 |
+
|
104 |
+
def get_tokens(self, text):
|
105 |
+
if self.model:
|
106 |
+
return self.model.tokenizer.tokenize(text)
|
107 |
+
|
108 |
+
tokens = []
|
109 |
+
for token in re.split(r'(\W|\b)', text):
|
110 |
+
if token.strip():
|
111 |
+
tokens.append(token)
|
112 |
+
|
113 |
+
return tokens
|
114 |
+
|
115 |
+
|
116 |
+
class SplitDocs:
|
117 |
+
def split_in_topics(self,
|
118 |
+
filedir: LiteralString = None,
|
119 |
+
*,
|
120 |
+
pattern_filename=r'(?<!navigation)\.(md|rst)',
|
121 |
+
pattern_content_sub=r'---\nhide:[\s\S]+?---\s*',
|
122 |
+
patterns_titles=(
|
123 |
+
r'^# (.+)', r'^## (.+)', r'^### (.+)'),
|
124 |
+
) -> List[Tuple[str, str]]:
|
125 |
+
def matches_pattern(filename):
|
126 |
+
return re.search(pattern_filename, filename) is not None
|
127 |
+
|
128 |
+
def split_patterns_recursive(patterns, text, index=-1):
|
129 |
+
sections = re.split(patterns[0], text, flags=re.MULTILINE)
|
130 |
+
for i, section in enumerate(sections):
|
131 |
+
if not section.strip():
|
132 |
+
continue
|
133 |
+
is_match = bool(i & 1)
|
134 |
+
if is_match:
|
135 |
+
yield (index, section)
|
136 |
+
elif len(patterns) > 1:
|
137 |
+
for j, section_j in split_patterns_recursive(patterns[1:], section, index + 1):
|
138 |
+
yield (j, section_j)
|
139 |
+
else:
|
140 |
+
yield (-1, section)
|
141 |
+
|
142 |
+
for root, _, files in os.walk(filedir):
|
143 |
+
for name in files:
|
144 |
+
if not matches_pattern(name):
|
145 |
+
continue
|
146 |
+
|
147 |
+
full_path = os.path.join(root, name)
|
148 |
+
with open(full_path, 'r', encoding='utf-8') as file:
|
149 |
+
content = file.read()
|
150 |
+
|
151 |
+
if pattern_content_sub:
|
152 |
+
content = re.sub(pattern_content_sub, '', content)
|
153 |
+
|
154 |
+
rel_path = full_path.replace(filedir, '').replace('\\', '/')
|
155 |
+
|
156 |
+
# Protect code parts
|
157 |
+
patterns = (r'(```[\s\S]+?```)', *patterns_titles)
|
158 |
+
|
159 |
+
last_titles = []
|
160 |
+
last_titles_index = []
|
161 |
+
content_accum = ''
|
162 |
+
for i, section in split_patterns_recursive(patterns, content):
|
163 |
+
if i < 0:
|
164 |
+
content_accum += section
|
165 |
+
continue
|
166 |
+
if content_accum:
|
167 |
+
yield rel_path, last_titles, content_accum
|
168 |
+
content_accum = ''
|
169 |
+
if not last_titles_index or i > last_titles_index[-1]:
|
170 |
+
last_titles_index.append(i)
|
171 |
+
last_titles.append(section)
|
172 |
+
continue
|
173 |
+
while len(last_titles_index) > 1 and i < last_titles_index[-1]:
|
174 |
+
last_titles_index.pop()
|
175 |
+
last_titles.pop()
|
176 |
+
# Replace
|
177 |
+
last_titles_index[-1] = i
|
178 |
+
last_titles[-1] = section
|
179 |
+
if content_accum or i != -1:
|
180 |
+
yield rel_path, last_titles, content_accum
|
181 |
+
|
182 |
+
def reduce_text(_self, text):
|
183 |
+
text = re.sub(r'^\n+', '', text) # Strip
|
184 |
+
text = re.sub(r'<.*?>', '', text) # Remove HTML tags
|
185 |
+
text = re.sub(r':\S*: ', '', text) # Remove [:...:] patterns
|
186 |
+
text = re.sub(r'\s*\n+', '\n', text)
|
187 |
+
return text
|
188 |
+
|
189 |
+
def embedding_header(_self, rel_path, titles):
|
190 |
+
return f"{rel_path}\n# {' | '.join(titles)}\n\n"
|
191 |
+
|
192 |
+
def split_for_embedding(self,
|
193 |
+
filedir: LiteralString = None,
|
194 |
+
*,
|
195 |
+
pattern_filename=r'(?<!navigation)\.(md|rst)',
|
196 |
+
pattern_content_sub=r'---\nhide:[\s\S]+?---\s*',
|
197 |
+
patterns_titles=(
|
198 |
+
r'^# (.+)', r'^## (.+)', r'^### (.+)'),
|
199 |
+
):
|
200 |
+
tokenizer = EMBEDDING_CTX.model.tokenizer
|
201 |
+
max_tokens = EMBEDDING_CTX.model.max_seq_length
|
202 |
+
texts = []
|
203 |
+
|
204 |
+
for rel_path, titles, content in self.split_in_topics(
|
205 |
+
filedir, pattern_filename=pattern_filename, pattern_content_sub=pattern_content_sub, patterns_titles=patterns_titles):
|
206 |
+
header = self.embedding_header(rel_path, titles)
|
207 |
+
tokens_pre_len = len(tokenizer.tokenize(header))
|
208 |
+
tokens_so_far = tokens_pre_len
|
209 |
+
text_so_far = header
|
210 |
+
for part in self.reduce_text(content).splitlines():
|
211 |
+
part += '\n'
|
212 |
+
part_tokens_len = len(tokenizer.tokenize(part))
|
213 |
+
if tokens_so_far + part_tokens_len > max_tokens:
|
214 |
+
texts.append(text_so_far)
|
215 |
+
text_so_far = header
|
216 |
+
tokens_so_far = tokens_pre_len
|
217 |
+
text_so_far += part
|
218 |
+
tokens_so_far += part_tokens_len
|
219 |
+
|
220 |
+
if tokens_so_far != tokens_pre_len:
|
221 |
+
texts.append(text_so_far)
|
222 |
+
|
223 |
+
return texts
|
224 |
+
|
225 |
+
|
226 |
+
EMBEDDING_CTX = EmbeddingContext()
|
routers/rag/embeddings_dev_docs.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:efb0a98ccf275d0a1b3b24e9ec29c741bacbb452eb825ee2ec7f1697c258b0c8
|
3 |
+
size 17001110
|
routers/rag/embeddings_issues.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fccb39b103030b69f46d734c9d29e8c4c6ca66d8cc6b7ffb995df8429d45a6c5
|
3 |
+
size 748953800
|
routers/rag/embeddings_manual.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d8129bc22629ff95b72518f11c724043611e81ed78ad6c7f2e090973f79f71a
|
3 |
+
size 23067144
|
routers/tool_bpy_doc.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# bpydoc.py
|
2 |
+
|
3 |
+
import pickle
|
4 |
+
from fastapi import APIRouter
|
5 |
+
from fastapi.responses import PlainTextResponse
|
6 |
+
|
7 |
+
|
8 |
+
router = APIRouter()
|
9 |
+
|
10 |
+
with open("routers/tool_bpy_doc_v4_1.pkl", 'rb') as file:
|
11 |
+
bpy_doc_map = pickle.load(file)
|
12 |
+
bpy_doc_map['__info'] = {'bases': None}
|
13 |
+
|
14 |
+
|
15 |
+
def bpy_doc_get_documentation(api):
|
16 |
+
parts = api.split('.')
|
17 |
+
api = ""
|
18 |
+
data = bpy_doc_map
|
19 |
+
ctx = []
|
20 |
+
for part in parts:
|
21 |
+
try:
|
22 |
+
data = data[part]
|
23 |
+
api += part
|
24 |
+
ctx.append((api, data["__info"]['descr']))
|
25 |
+
api += '.'
|
26 |
+
except Exception as ex:
|
27 |
+
descr = f"{type(ex).__name__}: {ex}. Perhaps this object was implemented in a later version."
|
28 |
+
ctx.append((api, descr))
|
29 |
+
break
|
30 |
+
|
31 |
+
documentation = ""
|
32 |
+
for obj, descr in ctx:
|
33 |
+
documentation += f"{obj}:\n"
|
34 |
+
documentation += f"{descr}\n\n"
|
35 |
+
|
36 |
+
if len(data) > 1:
|
37 |
+
documentation += f"Members of {api}:\n"
|
38 |
+
info = data["__info"]
|
39 |
+
if info['bases']:
|
40 |
+
documentation += f"Inherits from {info['bases']}\n"
|
41 |
+
|
42 |
+
for key, val in data.items():
|
43 |
+
if key != "__info":
|
44 |
+
descr = ""
|
45 |
+
if isinstance(val, list):
|
46 |
+
val = val[0]
|
47 |
+
descr = "`bpy_prop_collection` of {}".format(
|
48 |
+
val['__info']["descr"].replace('\n', ' '))
|
49 |
+
else:
|
50 |
+
descr = val['__info']["descr"].replace('\n', ' ')
|
51 |
+
|
52 |
+
documentation += f"- {key}: {descr}\n"
|
53 |
+
|
54 |
+
return documentation
|
55 |
+
|
56 |
+
|
57 |
+
@router.get("/bpy_doc", response_class=PlainTextResponse)
|
58 |
+
def bpy_doc(api: str = ""):
|
59 |
+
message = bpy_doc_get_documentation(api)
|
60 |
+
return message
|
61 |
+
|
62 |
+
|
63 |
+
if __name__ == "__main__":
|
64 |
+
test = bpy_doc("bpy.context")
|
65 |
+
print(test)
|
routers/tool_bpy_doc_v4_1.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd35f7e45cbd214cf92f14f53dccfe209deebc4d1f444061f8089dc4440d483c
|
3 |
+
size 1876094
|
routers/tool_calls.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import logging
|
3 |
+
from fastapi import APIRouter, Body
|
4 |
+
from typing import List, Dict
|
5 |
+
from pydantic import BaseModel
|
6 |
+
|
7 |
+
try:
|
8 |
+
from tool_gpu_checker import gpu_checker_get_message
|
9 |
+
from tool_bpy_doc import bpy_doc_get_documentation
|
10 |
+
from tool_find_related import find_related
|
11 |
+
from tool_wiki_search import wiki_search
|
12 |
+
except:
|
13 |
+
from routers.tool_gpu_checker import gpu_checker_get_message
|
14 |
+
from routers.tool_bpy_doc import bpy_doc_get_documentation
|
15 |
+
from routers.tool_find_related import find_related
|
16 |
+
from routers.tool_wiki_search import wiki_search
|
17 |
+
|
18 |
+
|
19 |
+
class ToolCallFunction(BaseModel):
|
20 |
+
name: str
|
21 |
+
arguments: str
|
22 |
+
|
23 |
+
|
24 |
+
class ToolCallInput(BaseModel):
|
25 |
+
id: str
|
26 |
+
type: str
|
27 |
+
function: ToolCallFunction
|
28 |
+
|
29 |
+
|
30 |
+
router = APIRouter()
|
31 |
+
|
32 |
+
|
33 |
+
def process_tool_call(tool_call: ToolCallInput) -> Dict:
|
34 |
+
output = {"tool_call_id": tool_call.id, "output": ""}
|
35 |
+
function_name = tool_call.function.name
|
36 |
+
|
37 |
+
try:
|
38 |
+
function_args = json.loads(tool_call.function.arguments)
|
39 |
+
if function_name == "get_bpy_api_info":
|
40 |
+
output["output"] = bpy_doc_get_documentation(
|
41 |
+
function_args.get("api", ""))
|
42 |
+
elif function_name == "check_gpu":
|
43 |
+
output["output"] = gpu_checker_get_message(
|
44 |
+
function_args.get("gpu", ""))
|
45 |
+
elif function_name == "find_related":
|
46 |
+
output["output"] = find_related(
|
47 |
+
function_args["repo"], function_args["number"])
|
48 |
+
elif function_name == "wiki_search":
|
49 |
+
output["output"] = wiki_search(**function_args)
|
50 |
+
except json.JSONDecodeError as e:
|
51 |
+
error_message = f"Malformed JSON encountered at position {e.pos}: {e.msg}\n {tool_call.function.arguments}"
|
52 |
+
output["output"] = error_message
|
53 |
+
|
54 |
+
# Logging the error for further investigation
|
55 |
+
logging.error(f"JSONDecodeError in process_tool_call: {error_message}")
|
56 |
+
|
57 |
+
return output
|
58 |
+
|
59 |
+
|
60 |
+
@router.post("/function_call", response_model=List[Dict])
|
61 |
+
def function_call(tool_calls: List[ToolCallInput] = Body(..., description="List of tool calls in the request body")):
|
62 |
+
"""
|
63 |
+
Endpoint to process tool calls.
|
64 |
+
Args:
|
65 |
+
tool_calls (List[ToolCallInput]): List of tool calls.
|
66 |
+
Returns:
|
67 |
+
List[Dict]: List of tool outputs with tool_call_id and output.
|
68 |
+
"""
|
69 |
+
tool_outputs = [process_tool_call(tool_input) for tool_input in tool_calls]
|
70 |
+
return tool_outputs
|
71 |
+
|
72 |
+
|
73 |
+
if __name__ == "__main__":
|
74 |
+
tool_calls_data = [
|
75 |
+
{
|
76 |
+
"id": "call_abc123",
|
77 |
+
"type": "function",
|
78 |
+
"function": {
|
79 |
+
"name": "get_bpy_api_info",
|
80 |
+
"arguments": "{\"api\":\"bpy.context.scene.world\"}"
|
81 |
+
}
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"id": "call_abc456",
|
85 |
+
"type": "function",
|
86 |
+
"function": {
|
87 |
+
"name": "check_gpu",
|
88 |
+
"arguments": "{\"gpu\":\"Mesa Intel(R) Iris(R) Plus Graphics 640 (Kaby Lake GT3e) (KBL GT3) Intel 4.6 (Core Profile) Mesa 22.2.5\"}"
|
89 |
+
}
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"id": "call_abc789",
|
93 |
+
"type": "function",
|
94 |
+
"function": {
|
95 |
+
"name": "find_related",
|
96 |
+
"arguments": "{\"repo\":\"blender\",\"number\":111434}"
|
97 |
+
}
|
98 |
+
},
|
99 |
+
{
|
100 |
+
"id": "call_abc101112",
|
101 |
+
"type": "function",
|
102 |
+
"function": {
|
103 |
+
"name": "wiki_search",
|
104 |
+
"arguments": "{\"query\":\"Set Snap Base\",\"groups\":[\"manual\"]}"
|
105 |
+
}
|
106 |
+
}
|
107 |
+
]
|
108 |
+
|
109 |
+
tool_calls = [
|
110 |
+
ToolCallInput(id=tc['id'], type=tc['type'],
|
111 |
+
function=ToolCallFunction(**tc['function']))
|
112 |
+
for tc in tool_calls_data
|
113 |
+
]
|
114 |
+
|
115 |
+
test = function_call(tool_calls)
|
116 |
+
print(test)
|
routers/tool_find_related.py
ADDED
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# routers/find_related.py
|
2 |
+
|
3 |
+
import os
|
4 |
+
import pickle
|
5 |
+
import torch
|
6 |
+
import re
|
7 |
+
|
8 |
+
from typing import List
|
9 |
+
from datetime import datetime, timedelta
|
10 |
+
from enum import Enum
|
11 |
+
from sentence_transformers import util
|
12 |
+
from fastapi import APIRouter
|
13 |
+
from fastapi.responses import PlainTextResponse
|
14 |
+
|
15 |
+
try:
|
16 |
+
from .rag import EMBEDDING_CTX
|
17 |
+
from .utils_gitea import gitea_fetch_issues, gitea_json_issue_get, gitea_issues_body_updated_at_get
|
18 |
+
except:
|
19 |
+
from rag import EMBEDDING_CTX
|
20 |
+
from utils_gitea import gitea_fetch_issues, gitea_json_issue_get, gitea_issues_body_updated_at_get
|
21 |
+
|
22 |
+
|
23 |
+
router = APIRouter()
|
24 |
+
|
25 |
+
issue_attr_filter = {'number', 'title', 'body',
|
26 |
+
'state', 'updated_at', 'created_at'}
|
27 |
+
|
28 |
+
|
29 |
+
class State(str, Enum):
|
30 |
+
opened = "opened"
|
31 |
+
closed = "closed"
|
32 |
+
all = "all"
|
33 |
+
|
34 |
+
|
35 |
+
class _Data(dict):
|
36 |
+
cache_path = "routers/rag/embeddings_issues.pkl"
|
37 |
+
|
38 |
+
@staticmethod
|
39 |
+
def _create_issue_string(title, body):
|
40 |
+
cleaned_body = body.replace('\r', '')
|
41 |
+
cleaned_body = cleaned_body.replace('**System Information**\n', '')
|
42 |
+
cleaned_body = cleaned_body.replace('**Blender Version**\n', '')
|
43 |
+
cleaned_body = cleaned_body.replace(
|
44 |
+
'Worked: (newest version of Blender that worked as expected)\n', '')
|
45 |
+
cleaned_body = cleaned_body.replace(
|
46 |
+
'**Short description of error**\n', '')
|
47 |
+
cleaned_body = cleaned_body.replace('**Addon Information**\n', '')
|
48 |
+
cleaned_body = cleaned_body.replace(
|
49 |
+
'**Exact steps for others to reproduce the error**\n', '')
|
50 |
+
cleaned_body = cleaned_body.replace(
|
51 |
+
'[Please describe the exact steps needed to reproduce the issue]\n', '')
|
52 |
+
cleaned_body = cleaned_body.replace(
|
53 |
+
'[Please fill out a short description of the error here]\n', '')
|
54 |
+
cleaned_body = cleaned_body.replace(
|
55 |
+
'[Based on the default startup or an attached .blend file (as simple as possible)]\n', '')
|
56 |
+
cleaned_body = re.sub(
|
57 |
+
r', branch: .+?, commit date: \d{4}-\d{2}-\d{2} \d{2}:\d{2}, hash: `.+?`', '', cleaned_body)
|
58 |
+
cleaned_body = re.sub(
|
59 |
+
r'\/?attachments\/[a-zA-Z0-9\-]+', 'attachment', cleaned_body)
|
60 |
+
cleaned_body = re.sub(
|
61 |
+
r'https?:\/\/[^\s/]+(?:\/[^\s/]+)*\/([^\s/]+)', lambda match: match.group(1), cleaned_body)
|
62 |
+
|
63 |
+
return title + '\n' + cleaned_body
|
64 |
+
|
65 |
+
@staticmethod
|
66 |
+
def _find_latest_date(issues, default_str=None):
|
67 |
+
# Handle the case where 'issues' is empty
|
68 |
+
if not issues:
|
69 |
+
return default_str
|
70 |
+
|
71 |
+
return max((issue['updated_at'] for issue in issues), default=default_str)
|
72 |
+
|
73 |
+
@classmethod
|
74 |
+
def _create_strings_to_embbed(cls, issues):
|
75 |
+
texts_to_embed = [cls._create_issue_string(
|
76 |
+
issue['title'], issue['body']) for issue in issues]
|
77 |
+
|
78 |
+
return texts_to_embed
|
79 |
+
|
80 |
+
def _data_ensure_size(self, repo, size_new):
|
81 |
+
ARRAY_CHUNK_SIZE = 4096
|
82 |
+
|
83 |
+
updated_at_old = None
|
84 |
+
arrays_size_old = 0
|
85 |
+
titles_old = []
|
86 |
+
try:
|
87 |
+
arrays_size_old = self[repo]['arrays_size']
|
88 |
+
if size_new <= arrays_size_old:
|
89 |
+
return
|
90 |
+
updated_at_old = self[repo]['updated_at']
|
91 |
+
titles_old = self[repo]['titles']
|
92 |
+
except:
|
93 |
+
pass
|
94 |
+
|
95 |
+
arrays_size_new = ARRAY_CHUNK_SIZE * \
|
96 |
+
(int(size_new / ARRAY_CHUNK_SIZE) + 1)
|
97 |
+
|
98 |
+
data_new = {
|
99 |
+
'updated_at': updated_at_old,
|
100 |
+
'arrays_size': arrays_size_new,
|
101 |
+
'titles': titles_old + [None] * (arrays_size_new - arrays_size_old),
|
102 |
+
'embeddings': torch.empty((arrays_size_new, *EMBEDDING_CTX.embedding_shape),
|
103 |
+
dtype=EMBEDDING_CTX.embedding_dtype,
|
104 |
+
device=EMBEDDING_CTX.embedding_device),
|
105 |
+
'opened': torch.zeros(arrays_size_new, dtype=torch.bool),
|
106 |
+
'closed': torch.zeros(arrays_size_new, dtype=torch.bool),
|
107 |
+
}
|
108 |
+
|
109 |
+
try:
|
110 |
+
data_new['embeddings'][:arrays_size_old] = self[repo]['embeddings']
|
111 |
+
data_new['opened'][:arrays_size_old] = self[repo]['opened']
|
112 |
+
data_new['closed'][:arrays_size_old] = self[repo]['closed']
|
113 |
+
except:
|
114 |
+
pass
|
115 |
+
|
116 |
+
self[repo] = data_new
|
117 |
+
|
118 |
+
def _embeddings_generate(self, repo):
|
119 |
+
if os.path.exists(self.cache_path):
|
120 |
+
with open(self.cache_path, 'rb') as file:
|
121 |
+
data = pickle.load(file)
|
122 |
+
self.update(data)
|
123 |
+
if repo in self:
|
124 |
+
return
|
125 |
+
|
126 |
+
issues = gitea_fetch_issues('blender', repo, state='all', since=None,
|
127 |
+
issue_attr_filter=issue_attr_filter)
|
128 |
+
|
129 |
+
# issues = sorted(issues, key=lambda issue: int(issue['number']))
|
130 |
+
|
131 |
+
print("Embedding Issues...")
|
132 |
+
texts_to_embed = self._create_strings_to_embbed(issues)
|
133 |
+
embeddings = EMBEDDING_CTX.encode(texts_to_embed)
|
134 |
+
|
135 |
+
self._data_ensure_size(repo, int(issues[0]['number']))
|
136 |
+
self[repo]['updated_at'] = self._find_latest_date(issues)
|
137 |
+
|
138 |
+
titles = self[repo]['titles']
|
139 |
+
embeddings_new = self[repo]['embeddings']
|
140 |
+
opened = self[repo]['opened']
|
141 |
+
closed = self[repo]['closed']
|
142 |
+
|
143 |
+
for i, issue in enumerate(issues):
|
144 |
+
number = int(issue['number'])
|
145 |
+
titles[number] = issue['title']
|
146 |
+
embeddings_new[number] = embeddings[i]
|
147 |
+
if issue['state'] == 'open':
|
148 |
+
opened[number] = True
|
149 |
+
if issue['state'] == 'closed':
|
150 |
+
closed[number] = True
|
151 |
+
|
152 |
+
def _embeddings_updated_get(self, repo):
|
153 |
+
with EMBEDDING_CTX.lock:
|
154 |
+
if not repo in self:
|
155 |
+
self._embeddings_generate(repo)
|
156 |
+
|
157 |
+
date_old = self[repo]['updated_at']
|
158 |
+
|
159 |
+
issues = gitea_fetch_issues(
|
160 |
+
'blender', repo, since=date_old, issue_attr_filter=issue_attr_filter)
|
161 |
+
|
162 |
+
# Get the most recent date
|
163 |
+
date_new = self._find_latest_date(issues, date_old)
|
164 |
+
|
165 |
+
if date_new == date_old:
|
166 |
+
# Nothing changed
|
167 |
+
return self[repo]
|
168 |
+
|
169 |
+
self[repo]['updated_at'] = date_new
|
170 |
+
|
171 |
+
# autopep8: off
|
172 |
+
# Consider that if the time hasn't changed, it's the same issue.
|
173 |
+
issues = [issue for issue in issues if issue['updated_at'] != date_old]
|
174 |
+
|
175 |
+
self._data_ensure_size(repo, int(issues[0]['number']))
|
176 |
+
|
177 |
+
updated_at = gitea_issues_body_updated_at_get(issues)
|
178 |
+
issues_to_embed = []
|
179 |
+
|
180 |
+
for i, issue in enumerate(issues):
|
181 |
+
number = int(issue['number'])
|
182 |
+
self[repo]['opened'][number] = issue['state'] == 'open'
|
183 |
+
self[repo]['closed'][number] = issue['state'] == 'closed'
|
184 |
+
|
185 |
+
title_old = self[repo]['titles'][number]
|
186 |
+
if title_old != issue['title']:
|
187 |
+
self[repo]['titles'][number] = issue['title']
|
188 |
+
issues_to_embed.append(issue)
|
189 |
+
elif not updated_at or updated_at[i] >= date_old:
|
190 |
+
issues_to_embed.append(issue)
|
191 |
+
|
192 |
+
if issues_to_embed:
|
193 |
+
print(f"Embedding {len(issues_to_embed)} issue{'s' if len(issues_to_embed) > 1 else ''}")
|
194 |
+
texts_to_embed = self._create_strings_to_embbed(issues_to_embed)
|
195 |
+
embeddings = EMBEDDING_CTX.encode(texts_to_embed)
|
196 |
+
|
197 |
+
for i, issue in enumerate(issues_to_embed):
|
198 |
+
number = int(issue['number'])
|
199 |
+
self[repo]['embeddings'][number] = embeddings[i]
|
200 |
+
|
201 |
+
# autopep8: on
|
202 |
+
return self[repo]
|
203 |
+
|
204 |
+
def _sort_similarity(self,
|
205 |
+
repo: str,
|
206 |
+
query_emb: List[torch.Tensor],
|
207 |
+
limit: int,
|
208 |
+
state: State = State.opened) -> list:
|
209 |
+
duplicates = []
|
210 |
+
|
211 |
+
data = self[repo]
|
212 |
+
embeddings = data['embeddings']
|
213 |
+
mask_opened = data["opened"]
|
214 |
+
|
215 |
+
if state == State.all:
|
216 |
+
mask = mask_opened | data["closed"]
|
217 |
+
else:
|
218 |
+
mask = data[state.value]
|
219 |
+
|
220 |
+
embeddings = embeddings[mask]
|
221 |
+
true_indices = mask.nonzero(as_tuple=True)[0]
|
222 |
+
|
223 |
+
ret = util.semantic_search(
|
224 |
+
query_emb, embeddings, top_k=limit, score_function=util.dot_score)
|
225 |
+
|
226 |
+
for score in ret[0]:
|
227 |
+
corpus_id = score['corpus_id']
|
228 |
+
number = true_indices[corpus_id].item()
|
229 |
+
closed_char = "" if mask_opened[number] else "~~"
|
230 |
+
text = f"{closed_char}#{number}{closed_char}: {data['titles'][number]}"
|
231 |
+
duplicates.append(text)
|
232 |
+
|
233 |
+
return duplicates
|
234 |
+
|
235 |
+
def find_relatedness(self, repo: str, number: int, limit: int = 20, state: State = State.opened):
|
236 |
+
data = self._embeddings_updated_get(repo)
|
237 |
+
|
238 |
+
# Check if the embedding already exists.
|
239 |
+
if data['titles'][number] is not None:
|
240 |
+
new_embedding = data['embeddings'][number]
|
241 |
+
else:
|
242 |
+
gitea_issue = gitea_json_issue_get('blender', repo, number)
|
243 |
+
text_to_embed = self._create_issue_string(
|
244 |
+
gitea_issue['title'], gitea_issue['body'])
|
245 |
+
|
246 |
+
new_embedding = EMBEDDING_CTX.encode([text_to_embed])
|
247 |
+
|
248 |
+
duplicates = self._sort_similarity(
|
249 |
+
repo, new_embedding, limit=limit, state=state)
|
250 |
+
|
251 |
+
if not duplicates:
|
252 |
+
return ''
|
253 |
+
|
254 |
+
if match := re.search(r'(~~)?#(\d+)(~~)?:', duplicates[0]):
|
255 |
+
number_cached = int(match.group(2))
|
256 |
+
if number_cached == number:
|
257 |
+
return '\n'.join(duplicates[1:])
|
258 |
+
|
259 |
+
return '\n'.join(duplicates)
|
260 |
+
|
261 |
+
|
262 |
+
G_data = _Data()
|
263 |
+
|
264 |
+
|
265 |
+
@router.get("/find_related/{repo}/{number}", response_class=PlainTextResponse)
|
266 |
+
def find_related(repo: str = 'blender', number: int = 104399, limit: int = 15, state: State = State.opened) -> str:
|
267 |
+
related = G_data.find_relatedness(repo, number, limit=limit, state=state)
|
268 |
+
return related
|
269 |
+
|
270 |
+
|
271 |
+
if __name__ == "__main__":
|
272 |
+
update_cache = True
|
273 |
+
if update_cache:
|
274 |
+
G_data._embeddings_updated_get('blender')
|
275 |
+
G_data._embeddings_updated_get('blender-addons')
|
276 |
+
with open(G_data.cache_path, "wb") as file:
|
277 |
+
# Converting the embeddings to be CPU compatible, as the virtual machine in use currently only supports the CPU.
|
278 |
+
for val in G_data.values():
|
279 |
+
val['embeddings'] = val['embeddings'].to(torch.device('cpu'))
|
280 |
+
|
281 |
+
pickle.dump(dict(G_data), file, protocol=pickle.HIGHEST_PROTOCOL)
|
282 |
+
|
283 |
+
# Converting the embeddings to be GPU.
|
284 |
+
for val in G_data.values():
|
285 |
+
val['embeddings'] = val['embeddings'].to(torch.device('cuda'))
|
286 |
+
|
287 |
+
# 'blender/blender/111434' must print #96153, #83604 and #79762
|
288 |
+
related1 = G_data.find_relatedness(
|
289 |
+
'blender', 111434, limit=20, state=State.all)
|
290 |
+
related2 = G_data.find_relatedness('blender-addons', 104399, limit=20)
|
291 |
+
|
292 |
+
print("These are the 20 most related issues:")
|
293 |
+
print(related1)
|
294 |
+
print()
|
295 |
+
print("These are the 20 most related issues:")
|
296 |
+
print(related2)
|
routers/tool_gpu_checker.py
ADDED
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# gpuchecker.py
|
2 |
+
|
3 |
+
import re
|
4 |
+
from fastapi import APIRouter
|
5 |
+
from fastapi.responses import PlainTextResponse
|
6 |
+
|
7 |
+
|
8 |
+
router = APIRouter()
|
9 |
+
|
10 |
+
|
11 |
+
def _check_graphics_card_info(supported_models, unsupported_models, graphics_card_info):
|
12 |
+
for model_pattern, descr in supported_models.items():
|
13 |
+
if match := re.search(model_pattern, graphics_card_info, re.I):
|
14 |
+
return True, match.group(), descr.format(*match.groups())
|
15 |
+
|
16 |
+
for model_pattern, descr in unsupported_models.items():
|
17 |
+
if match := re.search(model_pattern, graphics_card_info, re.I):
|
18 |
+
return False, match.group(), descr.format(*match.groups())
|
19 |
+
|
20 |
+
return False, None, None
|
21 |
+
|
22 |
+
|
23 |
+
def _check_amd(graphics_card_info):
|
24 |
+
supported_models = {
|
25 |
+
r"Radeon\s*6\d{2}([A-Z])?\b": "this model belongs to the RDNA 2 architecture",
|
26 |
+
r"(Radeon\s*)?R9\s*[A-Z0-9]+": "R9 models belongs to the GCN 1st gen or newer architecture",
|
27 |
+
r"(Radeon\s*)?(Pro\s*)?\bW7\d{3}(X)?\b(\s*Duo)?": "Radeon Pro W7000 models belongs to the RDNA 3 architecture",
|
28 |
+
r"(Radeon\s*)?(Pro\s*)?\bW([5-6])\d{3}(X)?\b(\s*Duo)?": "Radeon Pro W{2}000 models belongs to the RDNA 2 architecture",
|
29 |
+
r"(AMD\s*)?6800 XT": "this model belongs to the RDNA 2 architecture",
|
30 |
+
r"Radeon\s*(\(TM\)\s*)?RX Vega(\s*\d{2}\b)": "Radeon RX Vega models belongs to the GCN 5th gen architecture",
|
31 |
+
r"Radeon Pro Vega ((\d{2}(X)?|II)\b)?(\s*Duo)?": "Radeon Pro Vega models belongs to the GCN 5th gen architecture",
|
32 |
+
r"Radeon\s*(\(TM\)\s*)?Pro [4-5]\d{2}(X)?": "Radeon Pro 400/500 series belongs to the GCN 4th gen architecture",
|
33 |
+
r"Radeon VII": "RX models belongs to the GCN 5 architecture",
|
34 |
+
r"Radeon Graphics \(renoir": "this model belongs to the GCN 5th gen architecture (Vega)",
|
35 |
+
r"Radeon\s*(\(TM\)\s*)?Vega 8 (Graphics )?\(raven[^)]+\)": "this model belongs to the GCN 5th gen architecture (Vega)",
|
36 |
+
r"Radeon\s*(\(TM\)\s*)?(Pro\s*)?WX\s*(5\d{3}\b)": "this model belongs to the GCN 4th gen architecture",
|
37 |
+
r"FirePro": "FirePro models belongs to the GCN 1st gen or newer architecture",
|
38 |
+
r"HD\s*(7[7-9]\d{2})": "HD {0} model belongs to the GCN 1st gen or newer architecture",
|
39 |
+
r"(Radeon\s*)?RX\s*([5-7]\d{3})(M|X)?(\s*(XT|Series|S|XTX))?\b": "RX models belongs to the GCN 1st gen or newer architecture",
|
40 |
+
r"(Radeon\s*)?(RX\s*)?6(3|4)0\b": "it has Polaris 23 chip that belongs to GCN 4th gen architecture",
|
41 |
+
r"(Radeon\s*)?62(0|5)\b": "it has Polaris 24 chip that belongs to GCN 3st gen architecture",
|
42 |
+
r"(Radeon\s*)?610\b": "it has Banks chip that belongs to GCN 1st gen architecture",
|
43 |
+
r"(Radeon\s*)?RX\s*580(X?)\b": "it has Polaris 20 XT chip that belongs to GCN 4th gen architecture",
|
44 |
+
r"(Radeon\s*)?RX\s*570\b": "it has Ellesmere Pro chip that belongs to GCN 4th gen architecture",
|
45 |
+
r"(Radeon\s*)?RX\s*560X\b": "it has Polaris 31 XL chip that belongs to GCN 4th gen architecture",
|
46 |
+
r"(Radeon\s*)?RX\s*560\b": "it has Baffin XT chip that belongs to GCN 4th gen architecture",
|
47 |
+
r"(Radeon\s*)?5(40X|50X)\b": "it has Polaris 23 XT chip that belongs to GCN 4th gen architecture",
|
48 |
+
r"(Radeon\s*)?RX\s*5(40|50)\b": "it has Lexa Pro chip that belongs to GCN 4th gen architecture",
|
49 |
+
r"(Radeon\s*)?RX\s*480\b": "it has Arctic Islands chip that belongs to GCN 4th gen architecture",
|
50 |
+
r"(Radeon\s*)?(\(TM\)\s*)?RX\s*4[6-8]0(\b|D)": "it has Ellesmere chip that belongs to GCN 4st gen architecture",
|
51 |
+
r"(Radeon\s*)?5(30X|35)\b": "it has Polaris 24 XT chip that belongs to GCN 3rd gen architecture",
|
52 |
+
r"(Radeon\s*)?530\b": "it has Weston chip that belongs to GCN 3rd gen architecture",
|
53 |
+
r"(Radeon\s*)?520\b": "it has Banks chip that belongs to GCN 1st gen architecture",
|
54 |
+
r"(Radeon\s*)?(\(TM\)\s*)?R4": "Radeon R4 models belongs to the GCN 1st gen or newer architecture",
|
55 |
+
r"(Radeon\s*)?(\(TM\)\s*)?R5 (M)?335": "Radeon R5 M335 belongs to the GCN 1st gen architecture",
|
56 |
+
r"(Radeon\s*)?(\(TM\)\s*)?R7 (M)?2\d{2}(E|X)?\b": "Radeon R7 200 models belongs to GCN 1st or 2nd gen architecture",
|
57 |
+
r"(Radeon\s*)?(\(TM\)\s*)?R5 (M)?24\d(E|X)?\b": "Radeon R5 240 models belongs to GCN 1st gen architecture",
|
58 |
+
# r"Radeon\s*(\(TM\)\s*)?(Pro\s*)?Vega (Pro\s*)?": "this model belongs to the GCN 4th gen architecture",
|
59 |
+
# Add more model-to-architecture mappings as needed
|
60 |
+
}
|
61 |
+
|
62 |
+
unsupported_models = {
|
63 |
+
r"HD ([5-6])\d{3}": "HD {0}XXX models have TeraScale architecture that is older than GCN 1st gen",
|
64 |
+
r"HD\s*(7[3-6]\d{2})": "HD {0} model has TeraScale 2 architecture that is older than GCN 1st gen",
|
65 |
+
r"Radeon R5 (M)?2(2|3)\d(X)?\b": "Radeon R5 220/230 models belongs to Terascale 2 architecture that is older than GCN 1st gen",
|
66 |
+
r"(AMD\s*ATI\s*)?Radeon\s*680M": "AMD ATI Radeon 680M has TeraScale architecture that is older than GCN 1st gen",
|
67 |
+
# Add more model-to-architecture mappings as needed
|
68 |
+
}
|
69 |
+
|
70 |
+
return _check_graphics_card_info(supported_models, unsupported_models, graphics_card_info)
|
71 |
+
|
72 |
+
|
73 |
+
def _check_nvidia(graphics_card_info):
|
74 |
+
supported_models = {
|
75 |
+
r"(GeForce )?(RTX\s*)?(?<!\d)([2-4])0[5-9]\d(\s*(RTX|Ti))?\b": "RTX {2}0 series are newer than GTX 400",
|
76 |
+
r"(GeForce )?(GTX\s*)?(?<!\d)16[5-9]\d(\s*(GTX|Ti))?\b": "GTX 16 series are newer than GTX 400",
|
77 |
+
r"(GeForce )?(GTX\s*)?(?<!\d)10[5-9]\d(\s*(GTX|Ti))?\b": "GTX 10 series are newer than GTX 400",
|
78 |
+
r"(GTX )?TITAN": "GTX TITAN models are newer than GTX 400",
|
79 |
+
r"(RTX )?\bA(\d+)": "RTX A models are newer than GTX 400",
|
80 |
+
r"Quadro FX \d+": "Quadro FX series uses a Quadro-based architecture",
|
81 |
+
r"Quadro RTX \d+": "Quadro RTX series uses a Quadro-based architecture",
|
82 |
+
r"Quadro (K|M|P|GP|GV)?\d+(M)?": "it uses a Quadro-based architecture",
|
83 |
+
r"NVS 8\d{2}(s)?\b": "it uses a Maxwell based architecture",
|
84 |
+
r"(Quadro )?NVS 110M\b": "it uses a Maxwell based architecture",
|
85 |
+
r"(GeForce )?GT 730\b": "GeForce from 700 series are newer than GTX 400. It also has 2 or 4 GB",
|
86 |
+
r"(GeForce )?GTX ([4-9])\d{2}(\s*(GTX|Ti))?\b": "GPUs from GTX {1}00 series are newer than GTX 400",
|
87 |
+
r"(GeForce )?\bMX\d{3}\b": "MX models are newer than GTX 400",
|
88 |
+
r"Tesla (.+)": "it has a Tesla architecture",
|
89 |
+
# Add more model-to-architecture mappings as needed
|
90 |
+
}
|
91 |
+
|
92 |
+
unsupported_models = {
|
93 |
+
r"(GeForce )(GTX )?3\d{2}": "GTX 3XX models are older than GeForce 400",
|
94 |
+
r"(Quadro )?NVS 50\b": "although quadro, it only supports opengl 1.3 and is older than 10 years",
|
95 |
+
r"(Quadro )?NVS \d{3}(s)?\b": "it is older than 10 years",
|
96 |
+
r"(Quadro )?NVS 1[1-2]0M\b": "it is Curie-based and older than 10 years",
|
97 |
+
r"(Quadro )?NVS 1\d{2}M\b": "although it is Tesla-based it is older than 10 years",
|
98 |
+
r"(Quadro )?NVS 4200M\b": "although it has Fermi architecture (newer than Tesla) it is older than 10 years",
|
99 |
+
# Add unsupported model-to-architecture mappings if needed
|
100 |
+
}
|
101 |
+
|
102 |
+
return _check_graphics_card_info(supported_models, unsupported_models, graphics_card_info)
|
103 |
+
|
104 |
+
|
105 |
+
def _check_intel(graphics_card_info):
|
106 |
+
supported_models = {
|
107 |
+
r"HD (Graphics )?5\d{3}\b": "it has the Broadwell architecture and is less than 10 years old (from 2015)",
|
108 |
+
r"(Mesa\s*)?(Iris\s*)?Xe Graphics": "Tiger Lake is newer than Broadwell architecture",
|
109 |
+
r"Iris Plus Graphics G7": "Ice Lake is newer than Broadwell architecture",
|
110 |
+
r"UHD\s*(Graphics )?6[3-5]\d\b": "Coffee Lake or Comet Lake are newer than Broadwell architecture",
|
111 |
+
r"UHD\s*(Graphics )?62\d\b": "Kaby Lake is newer than Broadwell architecture",
|
112 |
+
r"HD\s*(Graphics )?(P)?6[1-3]\d\b": "Kaby Lake is newer than Broadwell architecture",
|
113 |
+
r"UHD\s*(Graphics )?60\d": "Gemini Lake is newer than Broadwell architecture",
|
114 |
+
r"UHD Graphics": "Kaby Lake, Coffee Lake or Comet Lake are newer than Broadwell architecture",
|
115 |
+
r"Iris": "Coffee Lake is newer than Broadwell architecture",
|
116 |
+
r"HD (Graphics )?5\d{2}\b": "Skylake is newer than Broadwell architecture",
|
117 |
+
r"Iris (Graphics )?6\d{3}\b": "it has the Broadwell architecture",
|
118 |
+
r"Intel(\(R\))? (Arc(\(TM\))?\s*)?(A)?7\d{2}\b": "the A770 model is based on the Intel Arc architecture that is newer than Broadwell",
|
119 |
+
r"Intel\s*(Arc\s*)?(A)?7\d{2}\b": "the A770 model is based on the Intel Arc architecture that is newer than Broadwell",
|
120 |
+
# Add more model-to-architecture mappings as needed
|
121 |
+
}
|
122 |
+
|
123 |
+
unsupported_models = {
|
124 |
+
r"HD (Graphics )?4\d{3}\b": "it has the Haswell architecture that is older than Broadwell architecture. Also [Due to driver issues, support for Intel HD4000 series GPUs has been dropped](https://wiki.blender.org/wiki/Reference/Release_Notes/4.0#:~:text=Due%20to%20driver%20issues%2C%20support%20for%20Intel%20HD4000%20series%20GPUs%20has%20been%20dropped)",
|
125 |
+
r"HD Graphics 3\d{3}\b": "Sandy Bridge is older than Broadwell architecture"
|
126 |
+
# Add unsupported model-to-architecture mappings if needed
|
127 |
+
}
|
128 |
+
|
129 |
+
return _check_graphics_card_info(supported_models, unsupported_models, graphics_card_info)
|
130 |
+
|
131 |
+
|
132 |
+
def _check_apple(graphics_card_info):
|
133 |
+
supported_models = {
|
134 |
+
r"(Apple\s*)?(`)?\bM1(`)?(\s*Max)?": "it is one of the new ARM-based system designed by Apple Inc",
|
135 |
+
r"(Apple\s*)?(`)?\bM2(`)?(\s*Max)?": "it is one of the new ARM-based system designed by Apple Inc",
|
136 |
+
# Add more model-to-architecture mappings as needed
|
137 |
+
}
|
138 |
+
|
139 |
+
unsupported_models = {
|
140 |
+
# Add unsupported model-to-architecture mappings if needed
|
141 |
+
}
|
142 |
+
|
143 |
+
return _check_graphics_card_info(supported_models, unsupported_models, graphics_card_info)
|
144 |
+
|
145 |
+
|
146 |
+
def _check_apple_os_version(os_version, is_apple_silicon):
|
147 |
+
major, minor = map(int, os_version.split(".")[:2])
|
148 |
+
if is_apple_silicon:
|
149 |
+
if major >= 11:
|
150 |
+
return True
|
151 |
+
else:
|
152 |
+
if major >= 10 and minor >= 15:
|
153 |
+
return True
|
154 |
+
return False
|
155 |
+
|
156 |
+
|
157 |
+
def gpu_checker_get_message(text):
|
158 |
+
is_supported = False
|
159 |
+
vendor = None
|
160 |
+
model = None
|
161 |
+
descr = None
|
162 |
+
|
163 |
+
if "nvidia" in text.lower() or "rtx" in text.lower() or "gtx" in text.lower() or "geforce" in text.lower():
|
164 |
+
vendor = 'NVIDIA'
|
165 |
+
is_supported, model, descr = _check_nvidia(text)
|
166 |
+
|
167 |
+
elif "amd " in text.lower() or "ati " in text.lower() or "radeon" in text.lower():
|
168 |
+
vendor = 'AMD'
|
169 |
+
is_supported, model, descr = _check_amd(text)
|
170 |
+
|
171 |
+
elif "intel" in text.lower():
|
172 |
+
vendor = 'Intel'
|
173 |
+
is_supported, model, descr = _check_intel(text)
|
174 |
+
|
175 |
+
elif "apple" in text.lower() or re.search(r'\bM1\b', text):
|
176 |
+
vendor = 'Apple'
|
177 |
+
is_supported, model, descr = _check_apple(text)
|
178 |
+
|
179 |
+
else:
|
180 |
+
for func in {_check_nvidia, _check_amd, _check_intel, _check_apple}:
|
181 |
+
is_supported, model, descr = func(text)
|
182 |
+
if model:
|
183 |
+
vendor = 'GPU'
|
184 |
+
break
|
185 |
+
|
186 |
+
if not vendor:
|
187 |
+
return "Could not find graphics card information"
|
188 |
+
elif not model:
|
189 |
+
return f"Could not determine the card model from {vendor}"
|
190 |
+
|
191 |
+
message = f"The {vendor} card {model} is {'supported' if is_supported else 'not supported'} as {descr}"
|
192 |
+
if not is_supported:
|
193 |
+
message += """
|
194 |
+
|
195 |
+
This GPU is below the minimum requirements for Blender, so Blender no longer provide support for it. https://www.blender.org/download/requirements/
|
196 |
+
Installing the latest graphics driver sometimes helps to make such GPUs work, see here for more information. https://docs.blender.org/manual/en/dev/troubleshooting/gpu/index.html
|
197 |
+
If that doesn't help, you can use Blender 2.79: https://www.blender.org/download/previous-versions/
|
198 |
+
"""
|
199 |
+
|
200 |
+
return message
|
201 |
+
|
202 |
+
|
203 |
+
@router.get("/gpu_checker", response_class=PlainTextResponse)
|
204 |
+
def gpu_checker(gpu_info: str = ""):
|
205 |
+
message = gpu_checker_get_message(gpu_info)
|
206 |
+
|
207 |
+
return message
|
208 |
+
|
209 |
+
|
210 |
+
if __name__ == "__main__":
|
211 |
+
gpu_info = "AMD Radeon HD 7660D"
|
212 |
+
message = gpu_checker_get_message(gpu_info)
|
213 |
+
print(message)
|
routers/tool_wiki_search.py
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# routers/tool_wiki_search.py
|
2 |
+
|
3 |
+
import base64
|
4 |
+
import os
|
5 |
+
import pickle
|
6 |
+
import re
|
7 |
+
import torch
|
8 |
+
from enum import Enum
|
9 |
+
from fastapi import APIRouter, Query, params
|
10 |
+
from fastapi.responses import PlainTextResponse
|
11 |
+
from heapq import nlargest
|
12 |
+
from sentence_transformers import util
|
13 |
+
from typing import Dict, List, Tuple, Set, LiteralString
|
14 |
+
|
15 |
+
try:
|
16 |
+
from .rag import SplitDocs, EMBEDDING_CTX
|
17 |
+
from .utils_gitea import gitea_wiki_page_get, gitea_wiki_pages_get
|
18 |
+
except:
|
19 |
+
from rag import SplitDocs, EMBEDDING_CTX
|
20 |
+
from utils_gitea import gitea_wiki_page_get, gitea_wiki_pages_get
|
21 |
+
|
22 |
+
|
23 |
+
MANUAL_DIR = "D:/BlenderDev/blender-manual/manual"
|
24 |
+
DOCS_DIR = "D:/BlenderDev/blender-developer-docs/docs"
|
25 |
+
|
26 |
+
|
27 |
+
class Group(str, Enum):
|
28 |
+
dev_docs = "dev_docs"
|
29 |
+
# wiki = "wiki"
|
30 |
+
manual = "manual"
|
31 |
+
|
32 |
+
|
33 |
+
GROUPS_DEFAULT = {Group.dev_docs, Group.manual}
|
34 |
+
|
35 |
+
|
36 |
+
class _Data(dict):
|
37 |
+
cache_path = "routers/rag/embeddings_{}.pkl"
|
38 |
+
|
39 |
+
def __init__(self):
|
40 |
+
for grp in list(Group):
|
41 |
+
cache_path = self.cache_path.format(grp.name)
|
42 |
+
if os.path.exists(cache_path):
|
43 |
+
with open(cache_path, 'rb') as file:
|
44 |
+
self[grp.name] = pickle.load(file)
|
45 |
+
continue
|
46 |
+
|
47 |
+
# Generate
|
48 |
+
print("Embedding Texts for", grp.name)
|
49 |
+
self[grp.name] = {}
|
50 |
+
|
51 |
+
# Create a list to store the text files
|
52 |
+
if grp is Group.dev_docs:
|
53 |
+
texts = self.docs_get_texts_to_embed()
|
54 |
+
# elif grp is Group.wiki:
|
55 |
+
# texts = self.wiki_get_texts_to_embed()
|
56 |
+
else:
|
57 |
+
texts = self.manual_get_texts_to_embed()
|
58 |
+
|
59 |
+
self[grp]['texts'] = texts
|
60 |
+
self[grp]['embeddings'] = EMBEDDING_CTX.encode(texts)
|
61 |
+
|
62 |
+
with open(cache_path, "wb") as file:
|
63 |
+
# Converting the embeddings to be CPU compatible, as the virtual machine in use currently only supports the CPU.
|
64 |
+
self[grp]['embeddings'] = self[grp]['embeddings'].to(
|
65 |
+
torch.device('cpu'))
|
66 |
+
|
67 |
+
pickle.dump(self[grp], file, protocol=pickle.HIGHEST_PROTOCOL)
|
68 |
+
|
69 |
+
@classmethod
|
70 |
+
def manual_get_texts_to_embed(cls):
|
71 |
+
class SplitManual(SplitDocs):
|
72 |
+
def reduce_text(_self, text):
|
73 |
+
# Remove repeated characters
|
74 |
+
text = re.sub(r'\^{3,}', '', text)
|
75 |
+
text = re.sub(r'-{3,}', '', text)
|
76 |
+
|
77 |
+
text = text.replace('.rst', '.html')
|
78 |
+
text = super().reduce_text(text)
|
79 |
+
return text
|
80 |
+
|
81 |
+
def embedding_header(self, rel_path, titles):
|
82 |
+
rel_path = rel_path.replace('.rst', '.html')
|
83 |
+
return super().embedding_header(rel_path, titles)
|
84 |
+
|
85 |
+
# Remove patterns ".. word::" and ":word:"
|
86 |
+
pattern_content_sub = r'\.\. [^\n]+\n+(?: {3,}[^\n]*\n)*|:\w+:'
|
87 |
+
patterns_titles = (
|
88 |
+
r'[\*#%]{3,}\n\s*(.+)\n[\*#%]{3,}', r'(?:[=+]{3,}\n)?\s*(.+)\n[=+]{3,}\n')
|
89 |
+
|
90 |
+
return SplitManual().split_for_embedding(
|
91 |
+
MANUAL_DIR,
|
92 |
+
pattern_content_sub=pattern_content_sub,
|
93 |
+
patterns_titles=patterns_titles,
|
94 |
+
)
|
95 |
+
|
96 |
+
@staticmethod
|
97 |
+
def wiki_get_texts_to_embed():
|
98 |
+
class SplitWiki(SplitDocs):
|
99 |
+
def split_in_topics(_self,
|
100 |
+
filedir: LiteralString = None,
|
101 |
+
*,
|
102 |
+
pattern_filename=None,
|
103 |
+
pattern_content_sub=None,
|
104 |
+
patterns_titles=None):
|
105 |
+
owner = "blender"
|
106 |
+
repo = "blender"
|
107 |
+
pages = gitea_wiki_pages_get(owner, repo)
|
108 |
+
for page_name in pages:
|
109 |
+
page_name_title = page_name["title"]
|
110 |
+
page = gitea_wiki_page_get(owner, repo, page_name_title)
|
111 |
+
rel_dir = f'/{owner}/{repo}/{page["sub_url"]}'
|
112 |
+
titles = [page_name_title]
|
113 |
+
text = base64.b64decode(
|
114 |
+
page["content_base64"]).decode('utf-8')
|
115 |
+
yield (rel_dir, titles, text)
|
116 |
+
|
117 |
+
def reduce_text(_self, text):
|
118 |
+
text = super().reduce_text(text)
|
119 |
+
text = text.replace('https://projects.blender.org', '')
|
120 |
+
return text
|
121 |
+
|
122 |
+
return SplitWiki().split_for_embedding()
|
123 |
+
|
124 |
+
@staticmethod
|
125 |
+
def docs_get_texts_to_embed():
|
126 |
+
class SplitBlenderDocs(SplitDocs):
|
127 |
+
def reduce_text(_self, text):
|
128 |
+
text = super().reduce_text(text)
|
129 |
+
# Remove .md or index.md
|
130 |
+
text = re.sub(r'(index)?.md', '', text)
|
131 |
+
return text
|
132 |
+
|
133 |
+
def embedding_header(_self, rel_path, titles):
|
134 |
+
rel_path = re.sub(r'(index)?.md', '', rel_path)
|
135 |
+
return super().embedding_header(rel_path, titles)
|
136 |
+
|
137 |
+
return SplitBlenderDocs().split_for_embedding(DOCS_DIR)
|
138 |
+
|
139 |
+
def _sort_similarity(
|
140 |
+
self,
|
141 |
+
text_to_search: str,
|
142 |
+
groups: Set[Group] = Query(
|
143 |
+
default={Group.dev_docs, Group.manual}),
|
144 |
+
limit: int = 5) -> List[str]:
|
145 |
+
base_url: Dict[Group, str] = {
|
146 |
+
Group.dev_docs: "https://developer.blender.org/docs",
|
147 |
+
# Group.wiki: "https://projects.blender.org",
|
148 |
+
Group.manual: "https://docs.blender.org/manual/en/dev"
|
149 |
+
}
|
150 |
+
query_emb = EMBEDDING_CTX.encode([text_to_search])
|
151 |
+
results: List[Tuple[float, str, Group]] = []
|
152 |
+
for grp in groups:
|
153 |
+
if grp not in self:
|
154 |
+
continue
|
155 |
+
|
156 |
+
search_results = util.semantic_search(
|
157 |
+
query_emb, self[grp]['embeddings'], top_k=limit, score_function=util.dot_score)
|
158 |
+
|
159 |
+
for score in search_results[0]:
|
160 |
+
corpus_id = score['corpus_id']
|
161 |
+
text = self[grp]['texts'][corpus_id]
|
162 |
+
results.append((score['score'], text, grp))
|
163 |
+
|
164 |
+
# Keep only the top `limit` results
|
165 |
+
top_results = nlargest(limit, results, key=lambda x: x[0])
|
166 |
+
|
167 |
+
# Extract sorted texts with base URL
|
168 |
+
sorted_texts = [base_url[grp] + text for _, text, grp in top_results]
|
169 |
+
|
170 |
+
return sorted_texts
|
171 |
+
|
172 |
+
|
173 |
+
G_data = _Data()
|
174 |
+
|
175 |
+
router = APIRouter()
|
176 |
+
|
177 |
+
|
178 |
+
@router.get("/wiki_search", response_class=PlainTextResponse)
|
179 |
+
def wiki_search(
|
180 |
+
query: str = "",
|
181 |
+
groups: Set[Group] = Query(default=GROUPS_DEFAULT)
|
182 |
+
) -> str:
|
183 |
+
try:
|
184 |
+
groups = GROUPS_DEFAULT.intersection(groups)
|
185 |
+
if len(groups) == 0:
|
186 |
+
raise
|
187 |
+
except:
|
188 |
+
groups = GROUPS_DEFAULT
|
189 |
+
|
190 |
+
texts = G_data._sort_similarity(query, groups)
|
191 |
+
result: str = ''
|
192 |
+
for text in texts:
|
193 |
+
result += f'\n---\n{text}'
|
194 |
+
return result
|
195 |
+
|
196 |
+
|
197 |
+
if __name__ == '__main__':
|
198 |
+
tests = ["Set Snap Base", "Building the Manual",
|
199 |
+
"Bisect Object", "Who are the Triagers", "4.3 Release Notes Motion Paths"]
|
200 |
+
result = wiki_search(tests[0])
|
201 |
+
print(result)
|
routers/utils_gitea.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# utils_gitea.py
|
2 |
+
|
3 |
+
import json
|
4 |
+
import urllib.error
|
5 |
+
import urllib.parse
|
6 |
+
import urllib.request
|
7 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
8 |
+
|
9 |
+
BASE_API_URL = "https://projects.blender.org/api/v1"
|
10 |
+
|
11 |
+
|
12 |
+
def url_json_get(url, data=None):
|
13 |
+
try:
|
14 |
+
if data:
|
15 |
+
data = json.dumps(data).encode('utf-8')
|
16 |
+
request = urllib.request.Request(url, data=data, method='POST')
|
17 |
+
request.add_header('Content-Type', 'application/json')
|
18 |
+
else:
|
19 |
+
request = urllib.request.Request(url)
|
20 |
+
|
21 |
+
response = urllib.request.urlopen(request)
|
22 |
+
response_data = json.loads(response.read())
|
23 |
+
return response_data
|
24 |
+
|
25 |
+
except urllib.error.URLError as ex:
|
26 |
+
print("Error making HTTP request:", ex)
|
27 |
+
return None
|
28 |
+
|
29 |
+
|
30 |
+
def url_json_get_all_pages(url, item_filter=None, limit=50, exclude=set(), verbose=False):
|
31 |
+
assert limit <= 50, "50 is the maximum limit of items per page"
|
32 |
+
|
33 |
+
url_for_page = f"{url}&limit={limit}&page="
|
34 |
+
|
35 |
+
with urllib.request.urlopen(url_for_page + '1') as response:
|
36 |
+
headers_first = response.info()
|
37 |
+
json_data_first = json.loads(response.read())
|
38 |
+
|
39 |
+
total_count = int(headers_first.get('X-Total-Count'))
|
40 |
+
total_pages = (total_count + limit - 1) // limit
|
41 |
+
|
42 |
+
def fetch_page(page):
|
43 |
+
if page == 1:
|
44 |
+
json_data = json_data_first
|
45 |
+
else:
|
46 |
+
json_data = url_json_get(url_for_page + str(page))
|
47 |
+
|
48 |
+
if verbose:
|
49 |
+
print(f"Fetched page {page}")
|
50 |
+
|
51 |
+
data = []
|
52 |
+
for item in json_data:
|
53 |
+
if exclude and int(item["number"]) in exclude:
|
54 |
+
continue
|
55 |
+
data.append({k: item[k] for k in item_filter}
|
56 |
+
if item_filter else item)
|
57 |
+
|
58 |
+
return data
|
59 |
+
|
60 |
+
with ThreadPoolExecutor() as executor:
|
61 |
+
futures = [executor.submit(fetch_page, page)
|
62 |
+
for page in range(1, total_pages + 1)]
|
63 |
+
all_results = [future.result() for future in as_completed(futures)]
|
64 |
+
|
65 |
+
return [item for sublist in all_results for item in sublist]
|
66 |
+
|
67 |
+
|
68 |
+
def gitea_json_issue_get(owner, repo, number):
|
69 |
+
"""
|
70 |
+
Get issue/pull JSON data.
|
71 |
+
"""
|
72 |
+
url = f"{BASE_API_URL}/repos/{owner}/{repo}/issues/{number}"
|
73 |
+
return url_json_get(url)
|
74 |
+
|
75 |
+
|
76 |
+
def gitea_fetch_issues(owner, repo, state='all', labels='', issue_attr_filter=None, since=None, exclude=set()):
|
77 |
+
query_params = {
|
78 |
+
'labels': labels,
|
79 |
+
'state': state,
|
80 |
+
'type': 'issues'}
|
81 |
+
|
82 |
+
if since:
|
83 |
+
query_params['since'] = since
|
84 |
+
|
85 |
+
base_url = f"{BASE_API_URL}/repos/{owner}/{repo}/issues"
|
86 |
+
encoded_query_params = urllib.parse.urlencode(query_params)
|
87 |
+
issues_url = f"{base_url}?{encoded_query_params}"
|
88 |
+
return url_json_get_all_pages(issues_url, item_filter=issue_attr_filter, exclude=exclude, verbose=True)
|
89 |
+
|
90 |
+
|
91 |
+
def gitea_issues_body_updated_at_get(issues, verbose=True):
|
92 |
+
def fetch_issue(issue):
|
93 |
+
number = issue['number']
|
94 |
+
if verbose:
|
95 |
+
print(f"Fetched issue #{number}")
|
96 |
+
|
97 |
+
json_data = url_json_get(
|
98 |
+
f"https://projects.blender.org/blender/blender/issues/{number}/content-history/list")
|
99 |
+
# Verify that the response contains the expected data before trying to access it
|
100 |
+
if json_data and json_data['results']:
|
101 |
+
return json_data['results'][0]['name'].split('datetime="')[1].split('"')[0]
|
102 |
+
else:
|
103 |
+
return issue['created_at']
|
104 |
+
|
105 |
+
with ThreadPoolExecutor() as executor:
|
106 |
+
futures = [executor.submit(fetch_issue, issue) for issue in issues]
|
107 |
+
all_results = [future.result() for future in as_completed(futures)]
|
108 |
+
|
109 |
+
return all_results
|
110 |
+
|
111 |
+
|
112 |
+
def gitea_wiki_page_get(owner, repo, page_name, verbose=True):
|
113 |
+
"""
|
114 |
+
Get a wiki page.
|
115 |
+
"""
|
116 |
+
encoded_page_name = urllib.parse.quote(page_name, safe='')
|
117 |
+
base_url = f"{BASE_API_URL}/repos/{owner}/{repo}/wiki/page/{encoded_page_name}"
|
118 |
+
return url_json_get(base_url)
|
119 |
+
|
120 |
+
|
121 |
+
def gitea_wiki_pages_get(owner, repo, verbose=True):
|
122 |
+
"""
|
123 |
+
Get all wiki pages.
|
124 |
+
"""
|
125 |
+
base_url = f"{BASE_API_URL}/repos/{owner}/{repo}/wiki/pages"
|
126 |
+
return url_json_get(base_url)
|
static/favicon.ico
ADDED
|
static/privace.txt
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
**Privacy Policy for Blender Assistant**
|
2 |
+
|
3 |
+
Effective Date: November 24, 2023
|
4 |
+
|
5 |
+
This Privacy Policy outlines the information we do not collect and provides an overview of the usage of the Blender Assistant and endpoints accessible at `https://mano-wii-function-calling.hf.space/api/v1`.
|
6 |
+
|
7 |
+
### Information We Do Not Collect
|
8 |
+
|
9 |
+
Blender Assistant does not collect the following information:
|
10 |
+
|
11 |
+
- **Conversations**: The chat API used connects directly to OpenAI's servers, so only OpenAI has access to the conversations.
|
12 |
+
- **Personally Identifiable Information**: The API does not gather personally identifiable information which includes IP or emails addresses.
|
13 |
+
- **Cookies**: The API does not utilize cookies.
|
14 |
+
|
15 |
+
### API Description
|
16 |
+
|
17 |
+
The API provide a chat and tools to provide assistance to Blender users.
|
18 |
+
|
19 |
+
## Tools:
|
20 |
+
"/gpu_checker": Determine if a GPU model is supported by Blender.
|
21 |
+
"/bpy_doc": Returns the documentation for a bpy python object or error description if not supported.
|
22 |
+
"/get_issue": Get the title, body, user and assets of the current report on the page.
|
23 |
+
"/get_messages": Get a list of all messages in the report.
|
24 |
+
|
25 |
+
### Your Consent
|
26 |
+
|
27 |
+
By using the Blender Assistant API, you consent to our privacy policy as described herein.
|
28 |
+
|
29 |
+
### Changes to Our Privacy Policy
|
30 |
+
|
31 |
+
If we decide to modify our privacy policy, we will update it and provide the revised version.
|
32 |
+
|
33 |
+
This document is CC-BY-SA. It was last updated on November 19, 2023.
|
34 |
+
|
35 |
+
For any questions or concerns regarding this privacy policy or the Blender 3D Information API, please contact us at grmncv@gmail.com.
|
utils/generate_bpy_doc.py
ADDED
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import bpy
|
2 |
+
import inspect
|
3 |
+
import time
|
4 |
+
import pickle
|
5 |
+
import mathutils
|
6 |
+
import os
|
7 |
+
import bpy_types
|
8 |
+
import addon_utils
|
9 |
+
import sys
|
10 |
+
|
11 |
+
INFO_MEMBER = "__info"
|
12 |
+
|
13 |
+
|
14 |
+
def get_info(name="", descr="", bases=None):
|
15 |
+
return {"name": name,
|
16 |
+
"descr": descr,
|
17 |
+
"bases": bases}
|
18 |
+
|
19 |
+
##################################################################
|
20 |
+
|
21 |
+
|
22 |
+
g_bpy_types = {}
|
23 |
+
|
24 |
+
|
25 |
+
def doc_from_bpy_struct(bl_rna):
|
26 |
+
bases = []
|
27 |
+
try:
|
28 |
+
base = bl_rna.base
|
29 |
+
while base:
|
30 |
+
bases.append(type(base).__name__)
|
31 |
+
base = base.base
|
32 |
+
except:
|
33 |
+
if not bases:
|
34 |
+
bases = None
|
35 |
+
|
36 |
+
return get_info(name=bl_rna.name, descr=bl_rna.description, bases=bases)
|
37 |
+
|
38 |
+
|
39 |
+
def bpy_type_first_step(bpy_type):
|
40 |
+
def is_member_from_base_class(bpy_type, identifier):
|
41 |
+
if identifier in bpy.types.ID.bl_rna.properties:
|
42 |
+
return True
|
43 |
+
|
44 |
+
bases = bpy_type.mro()[1:]
|
45 |
+
for base in bases:
|
46 |
+
if not hasattr(base, "bl_rna"):
|
47 |
+
continue
|
48 |
+
if identifier in base.bl_rna.properties:
|
49 |
+
return True
|
50 |
+
return False
|
51 |
+
|
52 |
+
info = doc_from_bpy_struct(bpy_type.bl_rna)
|
53 |
+
data = {INFO_MEMBER: info}
|
54 |
+
for prop in bpy_type.bl_rna.properties:
|
55 |
+
identifier = prop.identifier
|
56 |
+
if is_member_from_base_class(bpy_type, identifier):
|
57 |
+
continue
|
58 |
+
if prop.type == 'POINTER':
|
59 |
+
srna_type = prop.fixed_type.identifier
|
60 |
+
try:
|
61 |
+
pointer_type = getattr(bpy.types, srna_type)
|
62 |
+
data[identifier] = pointer_type
|
63 |
+
except Exception:
|
64 |
+
pass
|
65 |
+
continue
|
66 |
+
if prop.type == 'COLLECTION':
|
67 |
+
if prop.srna:
|
68 |
+
srna_type = prop.srna.identifier
|
69 |
+
pointer_type = getattr(bpy.types, srna_type)
|
70 |
+
data[identifier] = pointer_type
|
71 |
+
elif srna_type := prop.fixed_type.identifier:
|
72 |
+
pointer_type = getattr(bpy.types, srna_type)
|
73 |
+
data[identifier] = [pointer_type]
|
74 |
+
continue
|
75 |
+
|
76 |
+
info_member = doc_from_bpy_struct(prop)
|
77 |
+
data[identifier] = {INFO_MEMBER: info_member}
|
78 |
+
|
79 |
+
return data
|
80 |
+
|
81 |
+
|
82 |
+
def bpy_types_first_step():
|
83 |
+
global g_bpy_types
|
84 |
+
for bpy_type_name in dir(bpy.types):
|
85 |
+
bpy_type = getattr(bpy.types, bpy_type_name)
|
86 |
+
if not hasattr(bpy_type, "bl_rna"):
|
87 |
+
continue
|
88 |
+
g_bpy_types[bpy_type] = bpy_type_first_step(bpy_type)
|
89 |
+
|
90 |
+
|
91 |
+
def bpy_types_second_step():
|
92 |
+
global g_bpy_types
|
93 |
+
for bpy_type, map in g_bpy_types.items():
|
94 |
+
for key, val in map.items():
|
95 |
+
if hasattr(val, "bl_rna"):
|
96 |
+
map[key] = g_bpy_types[val]
|
97 |
+
elif isinstance(val, list):
|
98 |
+
val[0] = g_bpy_types[val[0]]
|
99 |
+
|
100 |
+
|
101 |
+
##################################################################
|
102 |
+
|
103 |
+
bases_builtin = {int, bool, float, str, bytes, tuple, list,
|
104 |
+
set, dict, mathutils.Vector, mathutils.Color, type(None)}
|
105 |
+
|
106 |
+
|
107 |
+
def is_member_inherited(obj, member):
|
108 |
+
mro_bases = inspect.getmro(type(obj))
|
109 |
+
mro_bases_set = set(mro_bases)
|
110 |
+
intersection = mro_bases_set.intersection(bases_builtin)
|
111 |
+
for base in intersection:
|
112 |
+
if hasattr(base, member):
|
113 |
+
return True
|
114 |
+
return False
|
115 |
+
|
116 |
+
|
117 |
+
def get_doc_recursive(parent, member):
|
118 |
+
ob = getattr(parent, member)
|
119 |
+
member_info = getattr(type(parent), member, ob)
|
120 |
+
if type(member_info) in bases_builtin or member == "bpy_func":
|
121 |
+
descr = type(member_info).__name__
|
122 |
+
return {INFO_MEMBER: get_info(descr=descr)}
|
123 |
+
|
124 |
+
if hasattr(type(ob), "bl_rna"):
|
125 |
+
return g_bpy_types[type(ob)]
|
126 |
+
|
127 |
+
if "bl_rna" in dir(ob):
|
128 |
+
return g_bpy_types[ob]
|
129 |
+
|
130 |
+
result = {}
|
131 |
+
descr = member_info.__doc__ if member_info.__doc__ else type(ob).__name__
|
132 |
+
result[INFO_MEMBER] = get_info(descr=descr)
|
133 |
+
|
134 |
+
for name in dir(ob):
|
135 |
+
if name.startswith("_"):
|
136 |
+
continue
|
137 |
+
if is_member_inherited(ob, name):
|
138 |
+
continue
|
139 |
+
|
140 |
+
ob_member = getattr(ob, name, None)
|
141 |
+
if ob_member == parent:
|
142 |
+
descr = type(parent).__name__
|
143 |
+
result[name] = {INFO_MEMBER: get_info(descr=descr)}
|
144 |
+
continue
|
145 |
+
if ob_member == os:
|
146 |
+
continue
|
147 |
+
if ob_member == bpy:
|
148 |
+
continue
|
149 |
+
if ob_member == bpy_types:
|
150 |
+
continue
|
151 |
+
if ob_member == addon_utils:
|
152 |
+
continue
|
153 |
+
if ob_member == sys:
|
154 |
+
continue
|
155 |
+
if name == "addon_install":
|
156 |
+
# This raises a Error
|
157 |
+
continue
|
158 |
+
|
159 |
+
result[name] = get_doc_recursive(ob, name)
|
160 |
+
return result
|
161 |
+
|
162 |
+
|
163 |
+
##################################################################
|
164 |
+
|
165 |
+
def print_doc_recursive(map, indent, name, max_step=3):
|
166 |
+
time.sleep(.5)
|
167 |
+
prefix = indent * '|'
|
168 |
+
print(prefix + name)
|
169 |
+
for key, val in map.items():
|
170 |
+
if key == INFO_MEMBER:
|
171 |
+
print(prefix + val.replace('\n', '\n' + prefix) + '\n' + prefix)
|
172 |
+
elif indent < max_step:
|
173 |
+
name_next = name + '.' + key
|
174 |
+
if isinstance(val, list):
|
175 |
+
print_doc_recursive(val[0], indent + 1,
|
176 |
+
name_next + "[0]", max_step=max_step)
|
177 |
+
else:
|
178 |
+
print_doc_recursive(
|
179 |
+
val, indent + 1, name_next, max_step=max_step)
|
180 |
+
|
181 |
+
|
182 |
+
def main():
|
183 |
+
print("-------------------------------------------------------------")
|
184 |
+
bpy_types_first_step()
|
185 |
+
bpy_types_second_step()
|
186 |
+
|
187 |
+
members = (
|
188 |
+
"app",
|
189 |
+
"context",
|
190 |
+
"data",
|
191 |
+
"msgbus",
|
192 |
+
"ops",
|
193 |
+
"path",
|
194 |
+
"props",
|
195 |
+
"types",
|
196 |
+
"utils",
|
197 |
+
)
|
198 |
+
|
199 |
+
result = {
|
200 |
+
"bpy": {INFO_MEMBER: get_info(descr=bpy.__doc__)},
|
201 |
+
"__info": {"bases": None},
|
202 |
+
}
|
203 |
+
for member in members:
|
204 |
+
result["bpy"][member] = get_doc_recursive(bpy, member)
|
205 |
+
|
206 |
+
# Reference some types at the beginning
|
207 |
+
result["bpy_struct"] = result["bpy"]["types"]["bpy_struct"]
|
208 |
+
result["bpy_types"] = result["bpy"]["types"]
|
209 |
+
|
210 |
+
if False:
|
211 |
+
print(result["bpy"]["props"]["BoolProperty"])
|
212 |
+
return
|
213 |
+
|
214 |
+
# print_doc_recursive(result, 1, "bpy")
|
215 |
+
bpy_doc_dir = "D:/Dev/function-calling/routersbpy_doc_v41.pkl"
|
216 |
+
with open(bpy_doc_dir, "wb") as file:
|
217 |
+
# print(result["types"]["bpy_func"])
|
218 |
+
pickle.dump(result, file, protocol=pickle.HIGHEST_PROTOCOL)
|
219 |
+
|
220 |
+
print(f"File '{bpy_doc_dir}' has been updated.")
|
221 |
+
|
222 |
+
|
223 |
+
if __name__ == '__main__':
|
224 |
+
main()
|