File size: 3,163 Bytes
3a0abc1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a06b399
3a0abc1
a06b399
3a0abc1
a06b399
b1ff179
 
a06b399
 
e17b407
3a0abc1
b1ff179
e17b407
a06b399
134d4df
e17b407
a06b399
3a0abc1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a06b399
3a0abc1
 
b5685e6
3a59da7
cdbd107
 
3a59da7
 
cdbd107
 
3a59da7
 
3a0abc1
a06b399
3a0abc1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import discord
import aiohttp

import ast
import os
import threading

intents = discord.Intents.default()
intents.message_content = True
bot = discord.Bot(intents = intents)
token = os.environ.get('TOKEN_DISCORD')


class Like_Dislike(discord.ui.View):
    @discord.ui.button(style=discord.ButtonStyle.primary, emoji="πŸ‘")
    async def like_button(self, button, interaction):
        await interaction.response.send_message("You liked the response")

    @discord.ui.button(style=discord.ButtonStyle.primary, emoji="πŸ‘Ž")
    async def dislike_button(self, button, interaction):
        await interaction.response.send_message("You disliked the response")


@bot.event
async def on_ready():
    print(f"{bot.user} is ready and online!")


@bot.slash_command(name="help", description="list of commands and other info.")
async def help(ctx: discord.ApplicationContext):
    await ctx.respond("Hello! FURY Bot responds to all your messages\
                      \n1)Inside Forum channel and\
                      \n2)Those that tag the bot.")


async def llm_output(question: str) -> tuple[str, str]:

    URL_LLM = 'https://robinroy03-fury-engine.hf.space'
    obj = {
        "query": question,
        "llm": "gemini-1.5-pro",
        "knn": 10,
        "stream": False
    }

    async with aiohttp.ClientSession() as session:
        async with session.post(URL_LLM + "/api/google/generate", json=obj) as response:
            if response.status == 500:
                return "Error 500"
            response_json = await response.json(content_type=None)

    return response_json['response'], response_json['references']


@bot.event
async def on_message(message):
    """
    Returns llm answer with the relevant context.
    """

    if (message.author == bot.user) or not(bot.user.mentioned_in(message)):
        return

    print(message.content)
    await message.reply(content="Your message was received, it'll take around 30 seconds for FURY to process an answer.")

    question = message.content.replace("<@1243428204124045385>", "")
    engine_response = await llm_output(question)

    try:
        llm_answer, references = engine_response
        llm_answer += f"\n\n**References**\n{references}"
        start = 0
        while llm_answer[start: start+1990] != "":
            if llm_answer[start+1990: start+1990+1990] == "":
                await message.reply(content=llm_answer[start: start+1990], view=Like_Dislike())
            else:
                await message.reply(content=llm_answer[start: start+1990])
            start += 1990                                  
    except Exception as e:                                                           # TODO: add logging
        print(e)
        await message.reply("An error occurred. Retry again. Try different prompts.")

def run_bot():
    bot.run(token)

# ===========================================================================================================================================================

from flask import Flask

app = Flask(__name__)

@app.route("/")
def home():
    return "The bot is online."


threading.Thread(target=run_bot).start()
app.run()