:gem: [Feature] Enable posting requests with express as backend
Browse files- .gitignore +2 -1
- networks/llm_requester.js +28 -31
- package.json +7 -0
- server.js +43 -0
.gitignore
CHANGED
@@ -1 +1,2 @@
|
|
1 |
-
|
|
|
|
1 |
+
node_modules
|
2 |
+
package-lock.json
|
networks/llm_requester.js
CHANGED
@@ -24,53 +24,50 @@ export class ChatCompletionsRequester {
|
|
24 |
prompt,
|
25 |
model = null,
|
26 |
temperature = null,
|
27 |
-
|
28 |
-
cors_proxy = null
|
29 |
) {
|
30 |
this.prompt = prompt;
|
31 |
this.model = model || get_selected_llm_model() || "gpt-turbo-3.5";
|
32 |
this.temperature =
|
33 |
temperature !== null ? temperature : get_selected_temperature();
|
34 |
|
35 |
-
this.
|
36 |
-
|
37 |
-
|
38 |
-
} else {
|
39 |
-
this.cors_proxy = window.location.href.replace(/\/*(:\d+)*\/?$/, "") + ":12349";
|
40 |
-
}
|
41 |
-
this.request_endpoint = concat_urls(
|
42 |
-
this.cors_proxy,
|
43 |
-
this.endpoint,
|
44 |
-
"/chat/completions"
|
45 |
-
);
|
46 |
this.controller = new AbortController();
|
47 |
}
|
48 |
-
|
49 |
-
this.
|
50 |
-
|
51 |
-
|
52 |
-
this.
|
53 |
"Content-Type": "application/json",
|
54 |
Authorization: `Bearer ${localStorage.getItem("openai_api_key")}`,
|
55 |
};
|
56 |
}
|
57 |
-
|
58 |
-
this.
|
59 |
-
this.
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
64 |
};
|
65 |
}
|
66 |
construct_request_params() {
|
67 |
-
this.
|
68 |
-
this.
|
69 |
-
this.
|
70 |
method: "POST",
|
71 |
-
headers: this.
|
72 |
-
body: JSON.stringify(this.
|
73 |
signal: this.controller.signal,
|
|
|
74 |
};
|
75 |
}
|
76 |
create_messager_components() {
|
@@ -79,7 +76,7 @@ export class ChatCompletionsRequester {
|
|
79 |
}
|
80 |
post() {
|
81 |
this.construct_request_params();
|
82 |
-
return fetch(this.
|
83 |
.then((response) => response.body)
|
84 |
.then((rb) => {
|
85 |
const reader = rb.getReader();
|
|
|
24 |
prompt,
|
25 |
model = null,
|
26 |
temperature = null,
|
27 |
+
openai_endpoint = null
|
|
|
28 |
) {
|
29 |
this.prompt = prompt;
|
30 |
this.model = model || get_selected_llm_model() || "gpt-turbo-3.5";
|
31 |
this.temperature =
|
32 |
temperature !== null ? temperature : get_selected_temperature();
|
33 |
|
34 |
+
this.openai_endpoint =
|
35 |
+
openai_endpoint || localStorage.getItem("openai_endpoint");
|
36 |
+
this.backend_request_endpoint = "/chat/completions";
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
this.controller = new AbortController();
|
38 |
}
|
39 |
+
construct_openai_request_headers() {
|
40 |
+
this.backend_request_headers = {
|
41 |
+
"Content-Type": "application/json",
|
42 |
+
};
|
43 |
+
this.openai_request_headers = {
|
44 |
"Content-Type": "application/json",
|
45 |
Authorization: `Bearer ${localStorage.getItem("openai_api_key")}`,
|
46 |
};
|
47 |
}
|
48 |
+
construct_backend_request_body() {
|
49 |
+
this.openai_request_messages = get_request_messages();
|
50 |
+
this.backend_request_body = {
|
51 |
+
openai_endpoint: this.openai_endpoint,
|
52 |
+
openai_request_method: "POST",
|
53 |
+
openai_request_headers: this.openai_request_headers,
|
54 |
+
openai_request_body: {
|
55 |
+
model: this.model,
|
56 |
+
messages: this.openai_request_messages,
|
57 |
+
temperature: this.temperature,
|
58 |
+
stream: true,
|
59 |
+
},
|
60 |
};
|
61 |
}
|
62 |
construct_request_params() {
|
63 |
+
this.construct_openai_request_headers();
|
64 |
+
this.construct_backend_request_body();
|
65 |
+
this.backend_request_params = {
|
66 |
method: "POST",
|
67 |
+
headers: this.backend_request_headers,
|
68 |
+
body: JSON.stringify(this.backend_request_body),
|
69 |
signal: this.controller.signal,
|
70 |
+
stream: true,
|
71 |
};
|
72 |
}
|
73 |
create_messager_components() {
|
|
|
76 |
}
|
77 |
post() {
|
78 |
this.construct_request_params();
|
79 |
+
return fetch(this.backend_request_endpoint, this.backend_request_params)
|
80 |
.then((response) => response.body)
|
81 |
.then((rb) => {
|
82 |
const reader = rb.getReader();
|
package.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"dependencies": {
|
3 |
+
"axios": "^1.6.2",
|
4 |
+
"body-parser": "^1.20.2",
|
5 |
+
"express": "^4.18.2"
|
6 |
+
}
|
7 |
+
}
|
server.js
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
const express = require("express");
|
2 |
+
const axios = require("axios");
|
3 |
+
const bodyParser = require("body-parser");
|
4 |
+
const path = require("path");
|
5 |
+
|
6 |
+
const app = express();
|
7 |
+
|
8 |
+
app.use(express.static(path.join(__dirname, ".")));
|
9 |
+
app.use(bodyParser.json());
|
10 |
+
|
11 |
+
app.get("/", (req, res) => {
|
12 |
+
res.sendFile(path.join(__dirname + "/index.html"));
|
13 |
+
});
|
14 |
+
|
15 |
+
app.post("/chat/completions", async (req, res) => {
|
16 |
+
try {
|
17 |
+
const {
|
18 |
+
openai_endpoint,
|
19 |
+
openai_request_method,
|
20 |
+
openai_request_headers,
|
21 |
+
openai_request_body,
|
22 |
+
} = req.body;
|
23 |
+
|
24 |
+
const response = await axios({
|
25 |
+
method: openai_request_method,
|
26 |
+
url: openai_endpoint + "/chat/completions",
|
27 |
+
data: openai_request_body,
|
28 |
+
headers: openai_request_headers,
|
29 |
+
responseType: "stream",
|
30 |
+
});
|
31 |
+
|
32 |
+
response.data.pipe(res);
|
33 |
+
} catch (error) {
|
34 |
+
console.error(error);
|
35 |
+
console.log(error.response.data);
|
36 |
+
res.status(500).json({ error: "Error calling OpenAI API" });
|
37 |
+
}
|
38 |
+
});
|
39 |
+
|
40 |
+
const port = 12345;
|
41 |
+
app.listen(port, () => {
|
42 |
+
console.log(`Server is running on http://localhost:${port}`);
|
43 |
+
});
|