robinroy03 commited on
Commit
dca8b19
1 Parent(s): 7417f0a

made the bot async, fixed the bug where it goes offline

Browse files
Files changed (2) hide show
  1. .gitignore +2 -1
  2. app.py +21 -18
.gitignore CHANGED
@@ -1,2 +1,3 @@
1
  .env
2
- venv/
 
 
1
  .env
2
+ venv/
3
+ learn.py
app.py CHANGED
@@ -1,8 +1,7 @@
1
  import discord
 
2
 
3
  import os
4
- import json
5
- import requests
6
  import threading
7
 
8
 
@@ -34,7 +33,7 @@ async def help(ctx: discord.ApplicationContext):
34
  \n2)Those that tag the bot.")
35
 
36
 
37
- def llm_output(question: str, context: str) -> str:
38
  """
39
  Returns output from the LLM using the given user-question and retrived context
40
  """
@@ -54,14 +53,15 @@ def llm_output(question: str, context: str) -> str:
54
  'prompt': prompt,
55
  'stream': False
56
  }
57
-
58
- response = requests.post(URL_LLM + "/api/generate", json=obj)
59
- response_json = json.loads(response.text)
60
-
 
61
  return response_json['response']
62
 
63
 
64
- def embedding_output(message: str) -> list:
65
  """
66
  Returns embeddings for the given message
67
 
@@ -70,13 +70,14 @@ def embedding_output(message: str) -> list:
70
 
71
  URL_EMBEDDING = 'https://robinroy03-fury-embeddings-endpoint.hf.space'
72
 
73
- response = requests.post(URL_EMBEDDING + "/embedding", json={"text": message})
74
- response_json = json.loads(response.text)
75
-
 
76
  return response_json['output']
77
 
78
 
79
- def db_output(embedding: list) -> dict:
80
  """
81
  Returns the KNN results.
82
 
@@ -85,9 +86,10 @@ def db_output(embedding: list) -> dict:
85
 
86
  URL_DB = 'https://robinroy03-fury-db-endpoint.hf.space'
87
 
88
- response = requests.post(URL_DB + "/query", json={"embeddings": embedding})
89
- response_json = json.loads(response.text)
90
-
 
91
  return response_json
92
 
93
 
@@ -104,9 +106,10 @@ async def on_message(message):
104
  await message.reply(content="Your message was received, it'll take around 30 seconds for FURY to process an answer.")
105
 
106
  question = message.content.replace("<@1243428204124045385>", "")
107
- embedding: list = embedding_output(question)
108
- db_knn: dict = db_output(embedding)
109
- llm_answer: str = llm_output(question, db_knn['matches'][0]['metadata']['text']) # for the highest knn result (for the test only right now) TODO: make this better
 
110
 
111
  try:
112
  await message.reply(content=llm_answer[:1990], view=Like_Dislike()) # TODO: handle large responses (>2000)
 
1
  import discord
2
+ import aiohttp
3
 
4
  import os
 
 
5
  import threading
6
 
7
 
 
33
  \n2)Those that tag the bot.")
34
 
35
 
36
+ async def llm_output(question: str, context: str) -> str:
37
  """
38
  Returns output from the LLM using the given user-question and retrived context
39
  """
 
53
  'prompt': prompt,
54
  'stream': False
55
  }
56
+
57
+ async with aiohttp.ClientSession() as session:
58
+ async with session.post(URL_LLM + "/api/generate", json=obj) as response:
59
+ response_json = await response.json()
60
+
61
  return response_json['response']
62
 
63
 
64
+ async def embedding_output(message: str) -> list:
65
  """
66
  Returns embeddings for the given message
67
 
 
70
 
71
  URL_EMBEDDING = 'https://robinroy03-fury-embeddings-endpoint.hf.space'
72
 
73
+ async with aiohttp.ClientSession() as session:
74
+ async with session.post(URL_EMBEDDING + "/embedding", json={"text": message}) as response:
75
+ response_json = await response.json()
76
+
77
  return response_json['output']
78
 
79
 
80
+ async def db_output(embedding: list) -> dict:
81
  """
82
  Returns the KNN results.
83
 
 
86
 
87
  URL_DB = 'https://robinroy03-fury-db-endpoint.hf.space'
88
 
89
+ async with aiohttp.ClientSession() as session:
90
+ async with session.post(URL_DB + "/query", json={"embeddings": embedding}) as response:
91
+ response_json = await response.json()
92
+
93
  return response_json
94
 
95
 
 
106
  await message.reply(content="Your message was received, it'll take around 30 seconds for FURY to process an answer.")
107
 
108
  question = message.content.replace("<@1243428204124045385>", "")
109
+
110
+ embedding: list = await embedding_output(question)
111
+ db_knn: dict = await db_output(embedding)
112
+ llm_answer: str = await llm_output(question, db_knn['matches'][0]['metadata']['text']) # for the highest knn result (for the test only right now) TODO: make this better
113
 
114
  try:
115
  await message.reply(content=llm_answer[:1990], view=Like_Dislike()) # TODO: handle large responses (>2000)