Spaces:
Running
Running
replaced by llama3
Browse files
server.js
CHANGED
@@ -2,6 +2,7 @@ const express = require('express')
|
|
2 |
const app = express()
|
3 |
const port = 8080
|
4 |
|
|
|
5 |
|
6 |
const hfToken = process.env.HF_TOKEN
|
7 |
|
@@ -85,12 +86,13 @@ async function Prompt(error, tentativas){
|
|
85 |
|
86 |
,options:{
|
87 |
use_cache: false
|
|
|
88 |
}
|
89 |
|
90 |
}
|
91 |
|
92 |
const response = await fetch(
|
93 |
-
"https://api-inference.huggingface.co/models/
|
94 |
{
|
95 |
headers: { Authorization: "Bearer "+hfToken, "content-type":"application/json" },
|
96 |
method: "POST",
|
@@ -131,6 +133,12 @@ app.get('/test', async (req, res) => {
|
|
131 |
res.send("Working!")
|
132 |
})
|
133 |
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
app.listen(port, () => {
|
135 |
console.log(`JayCoach running`)
|
136 |
})
|
|
|
2 |
const app = express()
|
3 |
const port = 8080
|
4 |
|
5 |
+
process.on('uncaughtException', err => console.log('JayCoach:Exception:', err))
|
6 |
|
7 |
const hfToken = process.env.HF_TOKEN
|
8 |
|
|
|
86 |
|
87 |
,options:{
|
88 |
use_cache: false
|
89 |
+
,wait_for_model: false
|
90 |
}
|
91 |
|
92 |
}
|
93 |
|
94 |
const response = await fetch(
|
95 |
+
"https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct",
|
96 |
{
|
97 |
headers: { Authorization: "Bearer "+hfToken, "content-type":"application/json" },
|
98 |
method: "POST",
|
|
|
133 |
res.send("Working!")
|
134 |
})
|
135 |
|
136 |
+
app.use(function(err, req, res, next) {
|
137 |
+
console.error(err.stack);
|
138 |
+
res.json({error:'Server error, admin must check logs',status:res.status})
|
139 |
+
});
|
140 |
+
|
141 |
+
|
142 |
app.listen(port, () => {
|
143 |
console.log(`JayCoach running`)
|
144 |
})
|