Radosław Wolnik commited on
Commit
fdd450a
·
1 Parent(s): 99e2e9c
Files changed (4) hide show
  1. .env.ini +1 -1
  2. .gitignore +14 -3
  3. app.py +6 -3
  4. project_structure_plan.md +329 -0
.env.ini CHANGED
@@ -1 +1 @@
1
- DISCORD_TOKEN_CHATTER=MTMzODQ4NTY2MzY3MDA3OTQ4OA.GbLJQg.BB2iExwzma7AvJ2vlybg8cAcAsheGWfU0Ker4M
 
1
+ DISCORD_TOKEN_CHATTER='MTMzODQ4NTY2MzY3MDA3OTQ4OA.GbLJQg.BB2iExwzma7AvJ2vlybg8cAcAsheGWfU0Ker4M'
.gitignore CHANGED
@@ -1,4 +1,15 @@
1
- /.idea/
2
- /.Chatter/
 
 
3
  .aider*
4
- .env
 
 
 
 
 
 
 
 
 
 
1
+ ./.idea/
2
+ P:/AiderDev/.idea
3
+ ./.chatter/
4
+
5
  .aider*
6
+ .env.ini
7
+ /.idea/.gitignore
8
+ /.idea/AiderProjectSettings.xml
9
+ /.idea/Chatter.iml
10
+ /.idea/encodings.xml
11
+ /.idea/misc.xml
12
+ /.idea/modules.xml
13
+ /.idea/inspectionProfiles/profiles_settings.xml
14
+ /.idea/inspectionProfiles/Project_Default.xml
15
+ /.idea/vcs.xml
app.py CHANGED
@@ -95,7 +95,10 @@ async def on_ready():
95
 
96
  import os
97
  from dotenv import load_dotenv
98
- load_dotenv()
99
- DISCORD_TOKEN_CHATTER = str(os.getenv("DISCORD_TOKEN_CHATTER"))
 
 
100
 
101
- bot.run(DISCORD_TOKEN_CHATTER)
 
 
95
 
96
  import os
97
  from dotenv import load_dotenv
98
+ env_path = os.path.join(os.path.dirname(__file__), ".env.ini")
99
+ load_dotenv(env_path)
100
+ str(os.getenv("DISCORD_TOKEN_CHATTER"))
101
+ print(str(os.getenv("DISCORD_TOKEN_CHATTER")))
102
 
103
+
104
+ #bot.run(DISCORD_TOKEN_CHATTER)
project_structure_plan.md ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 
2
+ 1. Project Structure
3
+
4
+ First, let's organize the project into logical modules. Here's a suggested directory structure:
5
+
6
+
7
+ Chatter/
8
+ ├── .gitignore
9
+ ├── .env
10
+ ├── requirements.txt
11
+ ├── app.py
12
+ ├── ChatAI/
13
+ │ ├── chat_ai.py
14
+ │ └── __init__.py
15
+ ├── core/
16
+ │ ├── __init__.py
17
+ │ ├── bot.py
18
+ │ │ └── __init__.py
19
+ │ ├── message_handler.py
20
+ │ └── __init__.py
21
+ ├── logging/
22
+ │ ├── __init__.py
23
+ │ └── handlers.py
24
+ ├── utils/
25
+ │ ├── __init__.py
26
+ │ ├── constants.py
27
+ │ └── decorators.py
28
+ └── .env.local
29
+
30
+
31
+ 2. Move Code to Modules
32
+
33
+ Let's break down the existing code into these modules:
34
+
35
+ a) core/bot.py
36
+
37
+ Move the bot initialization, command setup, and basic functionality here.
38
+
39
+
40
+ import commands
41
+ from .message_handler import on_message, respond_to_chat
42
+ from .utils import split_string, generate, generate_response
43
+
44
+ bot = commands.Bot(command_prefix="!")
45
+
46
+ def setup_commands():
47
+ # Command definitions
48
+ pass
49
+
50
+ async def on_ready():
51
+ print(f'Logged in as {bot.user}')
52
+ # Initialize channels and message counts
53
+ pass
54
+
55
+ @bot.event
56
+ async def on_message(message):
57
+ # Verification logic
58
+ pass
59
+
60
+ @bot.event
61
+ async def on_ready():
62
+ # Bot initialization and setup
63
+ pass
64
+
65
+ # Initialize message counts and channels
66
+ message_counts = {}
67
+ channel = get_default_channel()
68
+
69
+
70
+ b) core/message_handler.py
71
+
72
+ Move the message handling logic here.
73
+
74
+
75
+ import discord
76
+ from .utils import split_string, generate, generate_response
77
+
78
+ @bot.event
79
+ async def on_message(message):
80
+ guild = message.guild
81
+ channel = get_default_channel()
82
+
83
+ if message.channel != channel or message.author.bot:
84
+ return
85
+
86
+ if message.channel.id not in message_counts:
87
+ message_counts[message.channel.id] = 0
88
+
89
+ message_counts[message.channel.id] += 1
90
+ print(message_counts[message.channel.id])
91
+
92
+ await respond_to_chat(message.content)
93
+
94
+ if message_counts[message.channel.id] >= 10:
95
+ messages = [message.content]
96
+ # Get previous messages from history
97
+ async for msg in channel.history(limit=10):
98
+ messages.append(msg.content)
99
+ messages = "\n".join(messages)
100
+ await respond_to_chat(messages)
101
+ message_counts[message.channel.id] = 0
102
+
103
+ async def respond_to_chat(content: str) -> str:
104
+ response = generate(content)
105
+ parts = split_string(response)
106
+ # Send parts to channel
107
+ await send_split_message(parts, channel)
108
+ return "\n".join(parts)
109
+
110
+ def split_string(text: str) -> list[str]:
111
+ return [text[i:i+1900] for i in range(0, len(text), 1900)]
112
+
113
+ def generate(
114
+ prompt, temperature=0.0, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0
115
+ ):
116
+ # Use MistralAI model
117
+ pass
118
+
119
+ def generate_response(prompt):
120
+ # Use MistralAI model
121
+ pass
122
+
123
+ async def send_split_message(parts, channel):
124
+ for part in parts:
125
+ await channel.send(part)
126
+
127
+
128
+ c) core/utils.py
129
+
130
+ Move the utility functions here.
131
+
132
+
133
+ import os
134
+ from huggingface_hub import InferenceClient
135
+
136
+ def split_string(text: str) -> list[str]:
137
+ """Helper function to split text into chunks"""
138
+ return [text[i:i+1900] for i in range(0, len(text), 1900)]
139
+
140
+ def generate(
141
+ prompt, temperature=0.0, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0
142
+ ):
143
+ """Generate response using MistralAI model"""
144
+ if temperature == 0.0:
145
+ temperature = random.uniform(1e-2, 0.9)
146
+ temperature = float(temperature)
147
+
148
+ top_p = float(top_p)
149
+
150
+ generate_kwargs = dict(
151
+ temperature=temperature,
152
+ max_new_tokens=max_new_tokens,
153
+ top_p=top_p,
154
+ repetition_penalty=repetition_penalty,
155
+ do_sample=True,
156
+ seed=42,
157
+ )
158
+
159
+ return ai.text_generation(prompt, **generate_kwargs, stream=False, details=False,
160
+ return_full_text=True)
161
+
162
+ def generate_response(prompt):
163
+ """Generate and format response"""
164
+ response = generate(prompt)
165
+ formatted_response = "\n".join(response)
166
+ return formatted_response
167
+
168
+ def get_default_channel():
169
+ return discord.utils.get(guild.channels, name="PrzebieralniaKoedukacyjna")
170
+
171
+
172
+ d) ChatAI/chat_ai.py
173
+
174
+ Move the AI configuration here.
175
+
176
+
177
+ from huggingface_hub import InferenceClient
178
+
179
+ class ChatAI:
180
+ def __init__(self):
181
+ self.pipe = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
182
+
183
+ async def generate(self, prompt):
184
+ return self.pipe.generate(prompt)
185
+
186
+
187
+ 3. Dependency Management
188
+
189
+ Update the requirements.txt to use a modern package manager like Poetry:
190
+
191
+ requirements.txt
192
+
193
+
194
+ <<<<<<< SEARCH
195
+ transformers
196
+ huggingface_hub==0.25.2
197
+ discord.py~=2.4.0
198
+ discord~=2.3.2
199
+ torch
200
+ six
201
+ python-dotenv~=1.0.1
202
+ huggingface-hub~=0.25.2
203
+ =======
204
+ discord~=2.4.0
205
+ discord.py~=2.4.0
206
+ transformers
207
+ huggingface_hub>=0.25.0
208
+ torch
209
+ six
210
+ python-dotenv>=1.0.0
211
+ poetry
212
+ >>>>>>> REPLACE
213
+
214
+
215
+ Run:
216
+
217
+
218
+ poetry install
219
+
220
+
221
+ 4. Add Logging
222
+
223
+ Create a logging module:
224
+
225
+ utils/logging.py
226
+
227
+
228
+ import logging
229
+ from datetime import datetime
230
+
231
+ logging.basicConfig(
232
+ level=logging.INFO,
233
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
234
+ handlers=[logging.FileHandler('chat.log')]
235
+ )
236
+
237
+ logger = logging.getLogger(__name__)
238
+
239
+
240
+ 5. Initialize the Bot
241
+
242
+ Create a main.py to initialize and run the bot:
243
+
244
+ main.py
245
+
246
+
247
+ from core.bot import commands
248
+ from core.message_handler import on_ready, on_message, respond_to_chat
249
+ from core.utils import split_string, generate, generate_response
250
+ from ChatAI.chat_ai import ChatAI
251
+ import logging
252
+
253
+ # Initialize logging
254
+ logging.basicConfig(
255
+ level=logging.INFO,
256
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
257
+ handlers=[logging.FileHandler('chat.log')]
258
+ )
259
+
260
+ logger = logging.getLogger(__name__)
261
+
262
+ # Initialize bot
263
+ bot = commands.Bot(command_prefix="!")
264
+
265
+ # Initialize AI
266
+ ai = ChatAI()
267
+
268
+ # Initialize message handler
269
+ async def setup():
270
+ logger.info("Initializing bot and message handler")
271
+ await on_ready()
272
+ logger.info("Bot initialized")
273
+
274
+ async def run_bot():
275
+ try:
276
+ await setup()
277
+ logger.info("Bot running...")
278
+ while True:
279
+ await bot.process_commands()
280
+ except Exception as e:
281
+ logger.error(f"Error in bot loop: {str(e)}")
282
+ logger.info("Bot shutdown")
283
+ finally:
284
+ logger.info("Bot shutdown complete")
285
+
286
+ if __name__ == "__main__":
287
+ try:
288
+ logger.info("Starting bot")
289
+ await bot.start(DISCORD_TOKEN_CHATTER)
290
+ logger.info("Bot running")
291
+ except Exception as e:
292
+ logger.error(f"Error starting bot: {str(e)}")
293
+ logger.info("Bot shutdown")
294
+
295
+
296
+ 6. Testing
297
+
298
+ Add tests to ensure the bot works as expected.
299
+
300
+ tests/ ├── .gitignore ├── test_message_handler.py └── test_chat_ai.py
301
+
302
+ 7. Deployment
303
+
304
+ For production, consider:
305
+
306
+ • Using a server (like Heroku or Railway)
307
+ • Implementing proper security
308
+ • Adding monitoring
309
+ • Implementing rate limiting
310
+
311
+ 8. Documentation
312
+
313
+ Add documentation to your project for future developers.
314
+
315
+ 9. Deployment Setup
316
+
317
+ Set up a deployment environment with:
318
+
319
+
320
+ poetry run --build
321
+
322
+
323
+ Then deploy to your chosen platform.
324
+
325
+ Summary
326
+
327
+ This structured approach organizes the project into modular components, manages dependencies
328
+ effectively, and provides logging and error handling for reliability. By following these steps, you'll
329
+ have a maintainable, scalable, and robust project structure.