:gem: [Feature] Get available models via backend
Browse files- networks/llm_requester.js +54 -16
- server.js +22 -1
networks/llm_requester.js
CHANGED
@@ -98,22 +98,60 @@ export class ChatCompletionsRequester {
|
|
98 |
}
|
99 |
}
|
100 |
|
|
|
101 |
export var available_models = [];
|
102 |
-
export
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
.
|
109 |
-
.
|
110 |
-
|
111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
});
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
console.error("Error:", error);
|
118 |
-
});
|
119 |
}
|
|
|
98 |
}
|
99 |
}
|
100 |
|
101 |
+
|
102 |
export var available_models = [];
|
103 |
+
export class AvailableModelsRequester {
|
104 |
+
constructor(
|
105 |
+
openai_endpoint = null
|
106 |
+
) {
|
107 |
+
this.openai_endpoint =
|
108 |
+
openai_endpoint || localStorage.getItem("openai_endpoint");
|
109 |
+
this.backend_request_endpoint = "/models";
|
110 |
+
this.controller = new AbortController();
|
111 |
+
}
|
112 |
+
construct_openai_request_headers() {
|
113 |
+
this.backend_request_headers = {
|
114 |
+
"Content-Type": "application/json",
|
115 |
+
};
|
116 |
+
this.openai_request_headers = {
|
117 |
+
"Content-Type": "application/json",
|
118 |
+
};
|
119 |
+
}
|
120 |
+
construct_backend_request_body() {
|
121 |
+
this.backend_request_body = {
|
122 |
+
openai_endpoint: this.openai_endpoint,
|
123 |
+
openai_request_method: "GET",
|
124 |
+
openai_request_headers: this.openai_request_headers,
|
125 |
+
};
|
126 |
+
}
|
127 |
+
construct_request_params() {
|
128 |
+
this.construct_openai_request_headers();
|
129 |
+
this.construct_backend_request_body();
|
130 |
+
this.backend_request_params = {
|
131 |
+
method: "POST",
|
132 |
+
headers: this.backend_request_headers,
|
133 |
+
body: JSON.stringify(this.backend_request_body),
|
134 |
+
signal: this.controller.signal,
|
135 |
+
};
|
136 |
+
}
|
137 |
+
get() {
|
138 |
+
this.construct_request_params();
|
139 |
+
return fetch(this.backend_request_endpoint, this.backend_request_params)
|
140 |
+
.then((response) => response.json())
|
141 |
+
.then((response_json) => {
|
142 |
+
response_json.forEach((item) => {
|
143 |
+
if (!(item.id in available_models)) {
|
144 |
+
available_models.push(item.id);
|
145 |
+
}
|
146 |
+
});
|
147 |
+
available_models.sort();
|
148 |
+
console.log(available_models);
|
149 |
+
})
|
150 |
+
.catch((error) => {
|
151 |
+
console.error("Error:", error);
|
152 |
});
|
153 |
+
}
|
154 |
+
stop() {
|
155 |
+
this.controller.abort();
|
156 |
+
}
|
|
|
|
|
157 |
}
|
server.js
CHANGED
@@ -32,7 +32,28 @@ app.post("/chat/completions", async (req, res) => {
|
|
32 |
response.data.pipe(res);
|
33 |
} catch (error) {
|
34 |
console.error(error);
|
35 |
-
res.status(500).json({ error: "Failed to
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
}
|
37 |
});
|
38 |
|
|
|
32 |
response.data.pipe(res);
|
33 |
} catch (error) {
|
34 |
console.error(error);
|
35 |
+
res.status(500).json({ error: "Failed to request OpenAI Endpoint" });
|
36 |
+
}
|
37 |
+
});
|
38 |
+
|
39 |
+
app.post("/models", async (req, res) => {
|
40 |
+
try {
|
41 |
+
const {
|
42 |
+
openai_endpoint,
|
43 |
+
openai_request_method,
|
44 |
+
openai_request_headers,
|
45 |
+
} = req.body;
|
46 |
+
|
47 |
+
const response = await axios({
|
48 |
+
method: openai_request_method,
|
49 |
+
url: openai_endpoint + "/models",
|
50 |
+
headers: openai_request_headers,
|
51 |
+
});
|
52 |
+
res.json(response.data);
|
53 |
+
|
54 |
+
} catch (error) {
|
55 |
+
console.error(error);
|
56 |
+
res.status(500).json({ error: "Failed to request OpenAI Endpoint" });
|
57 |
}
|
58 |
});
|
59 |
|