Quardo commited on
Commit
394915e
1 Parent(s): 6962b8c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +240 -189
app.py CHANGED
@@ -1,208 +1,238 @@
1
- from starlette.responses import HTMLResponse
2
- from fastapi import FastAPI, Request
3
- from typing import List
4
- import gradio as gr
5
- import requests
 
6
  import argparse
 
 
 
7
  import aiohttp
8
  import uvicorn
9
- import random
10
- import string
11
- import json
12
- import math
13
- import sys
14
- import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- API_BASE = "env"
17
- api_key = os.environ['API_KEY']
18
- oai_api_key = os.environ['OPENAI_API_KEY']
19
- base_url = os.environ.get('OPENAI_BASE_URL', "https://api.openai.com/v1")
20
- def_models = '["chatgpt-4o-latest", "gpt-4-0125-preview", "gpt-4-0613", "gpt-4-1106-preview", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-mini-2024-07-18", "gpt-4o-mini", "gpt-4o"]'
 
 
21
 
22
- def checkModels():
23
- global base_url
24
  if API_BASE == "env":
25
  try:
26
- response = requests.get(f"{base_url}/models", headers={"Authorization": f"Bearer {get_api_key()}"})
 
 
 
27
  response.raise_for_status()
28
- if not ('data' in response.json()):
29
- base_url = "https://api.openai.com/v1"
30
- api_key = oai_api_key
31
- except Exception as e:
32
- print(f"Error testing API endpoint: {e}")
 
 
 
 
 
 
33
  else:
34
- base_url = "https://api.openai.com/v1"
35
- api_key = oai_api_key
 
36
 
37
- def loadModels():
38
- global models, modelList
39
- models = json.loads(def_models)
40
- models = sorted(models)
41
-
42
- modelList = {
43
  "object": "list",
44
- "data": [{"id": v, "object": "model", "created": 0, "owned_by": "system"} for v in models]
45
  }
 
46
 
47
- def handleApiKeys():
48
- global api_key
49
- if ',' in api_key:
50
- output = []
51
- for key in api_key.split(','):
52
- try:
53
- response = requests.get(f"{base_url}/models", headers={"Authorization": f"Bearer {key}"})
54
- response.raise_for_status()
55
- if ('data' in response.json()):
56
- output.append(key)
57
- except Exception as e:
58
- print((F"API key {key} is not valid or an actuall error happend {e}"))
59
- if len(output)==1:
60
- raise RuntimeError("No API key is working")
61
- api_key = ",".join(output)
62
- else:
63
  try:
64
- response = requests.get(f"{base_url}/models", headers={"Authorization": f"Bearer {api_key}"})
 
 
 
65
  response.raise_for_status()
66
- if not ('data' in response.json()):
67
- raise RuntimeError("Current API key is not valid")
68
- except Exception as e:
69
- raise RuntimeError(f"Current API key is not valid or an actual error happened: {e}")
70
-
71
- def encodeChat(messages):
72
- output = []
73
- for message in messages:
74
- role = message['role']
75
- name = f" [{message['name']}]" if 'name' in message else ''
76
- content = message['content']
77
- formatted_message = f"<|im_start|>{role}{name}\n{content}<|end_of_text|>"
78
- output.append(formatted_message)
79
- return "\n".join(output)
80
-
81
- def get_api_key(call='api_key'):
82
- if call == 'api_key':
83
- key = api_key
84
- elif call == 'oai_api_key':
85
- key = oai_api_key
86
- else:
87
- key = api_key
88
 
89
- if ',' in key:
90
- return random.choice(key.split(','))
91
- return key
 
92
 
93
- def moderate(messages):
94
  try:
95
  response = requests.post(
96
- f"{base_url}/moderations",
97
  headers={
98
  "Content-Type": "application/json",
99
- "Authorization": f"Bearer {get_api_key(call='api_key')}"
100
  },
101
- json={"input": encodeChat(messages)}
102
  )
103
  response.raise_for_status()
104
  moderation_result = response.json()
105
- except requests.exceptions.RequestException as e:
106
- print(f"Error during moderation request to {base_url}: {e}")
 
107
  try:
108
  response = requests.post(
109
  "https://api.openai.com/v1/moderations",
110
  headers={
111
  "Content-Type": "application/json",
112
- "Authorization": f"Bearer {get_api_key(call='oai_api_key')}"
113
  },
114
- json={"input": encodeChat(messages)}
115
  )
116
  response.raise_for_status()
117
  moderation_result = response.json()
118
- except requests.exceptions.RequestException as e:
119
- print(f"Error during moderation request to fallback URL: {e}")
120
- return False
 
121
 
122
  try:
123
- if any(result["flagged"] for result in moderation_result["results"]):
 
 
 
 
 
124
  return moderation_result
125
- except KeyError:
126
- if moderation_result["flagged"]:
127
- return moderation_result
128
-
129
- return False
130
 
131
- async def streamChat(params):
132
- async with aiohttp.ClientSession() as session:
 
133
  try:
134
- async with session.post(f"{base_url}/chat/completions", headers={"Authorization": f"Bearer {get_api_key(call='api_key')}", "Content-Type": "application/json"}, json=params) as r:
135
- r.raise_for_status()
136
- async for line in r.content:
 
 
 
 
 
 
 
137
  if line:
138
- line_str = line.decode('utf-8')
139
  if line_str.startswith("data: "):
140
- line_str = line_str[6:].strip()
141
  if line_str == "[DONE]":
142
- continue
143
  try:
144
  message = json.loads(line_str)
145
  yield message
146
  except json.JSONDecodeError:
 
147
  continue
148
- except aiohttp.ClientError:
149
- try:
150
- async with session.post("https://api.openai.com/v1/chat/completions", headers={"Authorization": f"Bearer {get_api_key(call='oai_api_key')}", "Content-Type": "application/json"}, json=params) as r:
151
- r.raise_for_status()
152
- async for line in r.content:
153
- if line:
154
- line_str = line.decode('utf-8')
155
- if line_str.startswith("data: "):
156
- line_str = line_str[6:].strip()
157
- if line_str == "[DONE]":
158
- continue
159
- try:
160
- message = json.loads(line_str)
161
- yield message
162
- except json.JSONDecodeError:
163
- continue
164
- except aiohttp.ClientError:
165
  return
166
 
167
- def rnd(length=8):
168
- letters = string.ascii_letters + string.digits
169
- return ''.join(random.choice(letters) for i in range(length))
170
-
171
 
172
  async def respond(
173
- message,
174
- history: list[tuple[str, str]],
175
- model_name,
176
- max_tokens,
177
- temperature,
178
- top_p,
179
- ):
180
- messages = [];
181
-
182
- for val in history:
183
- if val[0]:
184
- messages.append({"role": "user", "content": val[0]})
185
- if val[1]:
186
- messages.append({"role": "assistant", "content": val[1]})
187
-
188
- messages.append({"role": "user", "content": message})
189
 
190
  if message:
191
- mode = moderate(messages)
192
- if mode:
 
193
  reasons = []
194
- categories = mode[0].get('categories', {}) if isinstance(mode, list) else mode.get('categories', {})
195
  for category, flagged in categories.items():
196
  if flagged:
197
  reasons.append(category)
198
  if reasons:
199
- yield "[MODERATION] I'm sorry, but I can't assist with that.\n\nReasons:\n```\n" + "\n".join([f"{i+1}. {reason}" for i, reason in enumerate(reasons)]) + "\n```"
200
  else:
201
- yield "[MODERATION] I'm sorry, but I can't assist with that."
 
 
202
  return
203
-
204
- response = ""
205
- async for token in streamChat({
206
  "model": model_name,
207
  "messages": messages,
208
  "max_tokens": max_tokens,
@@ -210,59 +240,80 @@ async def respond(
210
  "top_p": top_p,
211
  "user": rnd(),
212
  "stream": True
213
- }):
214
- response += token['choices'][0]['delta'].get("content", token['choices'][0]['delta'].get("refusal", ""))
215
- yield response
216
-
217
 
218
- handleApiKeys();loadModels();checkModels();
219
- demo = gr.ChatInterface(
220
- respond,
221
- title="gpt-4o-mini",
222
- description="The chat is back online for not-so-long-time.",
223
- additional_inputs=[
224
- gr.Dropdown(choices=models, value="gpt-4o-mini", label="Model"),
225
- gr.Slider(minimum=1, maximum=4096, value=4096, step=1, label="Max new tokens"),
226
- gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature"),
227
- gr.Slider(minimum=0.05, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
228
- ],
229
- css="footer{display:none !important}",
230
- head="""<script>if(!confirm("By using our application, which integrates with OpenAI's API, you acknowledge and agree to the following terms regarding the data you provide:\\n\\n1. Data Collection: This application may log the following data through the Gradio endpoint or the API endpoint: message requests (including messages, responses, model settings, and images sent along with the messages), images that were generated (including only the prompt and the image), search tool calls (including query, search results, summaries, and output responses), and moderation checks (including input and output).\\n2. Data Retention and Removal: Data is retained until further notice or until a specific request for removal is made.\\n3. Data Usage: The collected data may be used for various purposes, including but not limited to, administrative review of logs, AI training, and publication as a dataset.\\n4. Privacy: Please avoid sharing any personal information.\\n\\nBy continuing to use our application, you explicitly consent to the collection, use, and potential sharing of your data as described above. If you disagree with our data collection, usage, and sharing practices, we advise you not to use our application."))location.href="/declined";</script>"""
231
- )
 
 
 
 
 
 
 
 
 
 
232
 
233
- app = FastAPI()
 
234
 
235
- @app.get("/declined")
236
- def test():
237
- return HTMLResponse(content="""
238
- <html>
239
- <head>
240
- <title>Declined</title>
241
- </head>
242
- <body>
243
- <p>Ok, you can go back to Hugging Face. I just didn't have any idea how to handle decline so you are redirected here.</p><br/>
244
- <a href="/">Go back</button>
245
- </body>
246
- </html>
247
- """)
248
 
249
- app = gr.mount_gradio_app(app, demo, path="/")
 
 
250
 
251
  class ArgParser(argparse.ArgumentParser):
252
  def __init__(self, *args, **kwargs):
253
- super(ArgParser, self).__init__(*args, **kwargs)
254
-
255
- self.add_argument("-s", "--server", type=str, default="0.0.0.0")
256
- self.add_argument("-p", "--port", type=int, default=7860)
257
- self.add_argument("-d", "--dev", default=False, action="store_true")
258
-
259
  self.args = self.parse_args(sys.argv[1:])
260
 
261
- if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
262
  args = ArgParser().args
263
- if args.dev:
264
- uvicorn.run("__main__:app", host=args.server, port=args.port, reload=True)
265
- else:
266
- uvicorn.run("__main__:app", host=args.server, port=args.port, reload=False)
267
 
 
 
 
 
 
 
268
 
 
 
 
1
+ import os
2
+ import sys
3
+ import json
4
+ import math
5
+ import string
6
+ import random
7
  import argparse
8
+ import logging
9
+ from typing import List, Tuple, Optional, AsyncGenerator
10
+
11
  import aiohttp
12
  import uvicorn
13
+ import requests
14
+ import gradio as gr
15
+ from fastapi import FastAPI, Request
16
+ from starlette.responses import HTMLResponse
17
+
18
+ # Configure logging
19
+ logging.basicConfig(
20
+ level=logging.INFO,
21
+ format='%(asctime)s [%(levelname)s] %(message)s',
22
+ handlers=[
23
+ logging.StreamHandler(sys.stdout)
24
+ ]
25
+ )
26
+ logger = logging.getLogger(__name__)
27
+
28
+ # Environment Variables
29
+ API_BASE = os.getenv("API_BASE", "env")
30
+ API_KEY = os.getenv("API_KEY")
31
+ OAI_API_KEY = os.getenv("OPENAI_API_KEY")
32
+ BASE_URL = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
33
+ DEF_MODELS = [
34
+ "chatgpt-4o-latest", "gpt-4-0125-preview", "gpt-4-0613", "gpt-4-1106-preview",
35
+ "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4",
36
+ "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20",
37
+ "gpt-4o-mini-2024-07-18", "gpt-4o-mini", "gpt-4o"
38
+ ]
39
+
40
+ models = []
41
+ model_list = {}
42
+
43
+ # Exception for API Key handling
44
+ class APIKeyError(Exception):
45
+ pass
46
+
47
+ def get_api_key(call: str = 'api_key') -> str:
48
+ key = API_KEY if call == 'api_key' else (OAI_API_KEY if call == 'oai_api_key' else API_KEY)
49
+ if ',' in key:
50
+ selected_key = random.choice(key.split(','))
51
+ logger.debug(f"Selected API key: {selected_key}")
52
+ return selected_key
53
+ return key
54
 
55
+ def encode_chat(messages: List[dict]) -> str:
56
+ encoded = "\n".join(
57
+ f"<|im_start|>{msg['role']}{f' [{msg['name']}]' if 'name' in msg else ''}\n{msg['content']}<|end_of_text|>"
58
+ for msg in messages
59
+ )
60
+ logger.debug(f"Encoded chat: {encoded}")
61
+ return encoded
62
 
63
+ def check_models():
64
+ global BASE_URL, API_KEY
65
  if API_BASE == "env":
66
  try:
67
+ response = requests.get(
68
+ f"{BASE_URL}/models",
69
+ headers={"Authorization": f"Bearer {get_api_key()}"}
70
+ )
71
  response.raise_for_status()
72
+ data = response.json()
73
+ if 'data' not in data:
74
+ logger.warning("No 'data' in response. Falling back to default BASE_URL and API_KEY.")
75
+ BASE_URL = "https://api.openai.com/v1"
76
+ API_KEY = OAI_API_KEY
77
+ else:
78
+ logger.info("Successfully fetched models from API_BASE.")
79
+ except requests.RequestException as e:
80
+ logger.error(f"Error testing API endpoint: {e}. Falling back to default BASE_URL and API_KEY.")
81
+ BASE_URL = "https://api.openai.com/v1"
82
+ API_KEY = OAI_API_KEY
83
  else:
84
+ BASE_URL = "https://api.openai.com/v1"
85
+ API_KEY = OAI_API_KEY
86
+ logger.info("Using default BASE_URL and OAI_API_KEY.")
87
 
88
+ def load_models():
89
+ global models, model_list
90
+ models = sorted(DEF_MODELS)
91
+ model_list = {
 
 
92
  "object": "list",
93
+ "data": [{"id": model_id, "object": "model", "created": 0, "owned_by": "system"} for model_id in models]
94
  }
95
+ logger.info(f"Loaded models: {models}")
96
 
97
+ def handle_api_keys():
98
+ global API_KEY
99
+ valid_keys = []
100
+ keys = API_KEY.split(',') if ',' in API_KEY else [API_KEY]
101
+ for key in keys:
 
 
 
 
 
 
 
 
 
 
 
102
  try:
103
+ response = requests.get(
104
+ f"{BASE_URL}/models",
105
+ headers={"Authorization": f"Bearer {key.strip()}"}
106
+ )
107
  response.raise_for_status()
108
+ if 'data' in response.json():
109
+ valid_keys.append(key.strip())
110
+ logger.debug(f"Valid API key: {key.strip()}")
111
+ else:
112
+ logger.warning(f"API key {key.strip()} is invalid.")
113
+ except requests.RequestException as e:
114
+ logger.error(f"API key {key.strip()} is not valid or an error occurred: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
+ if not valid_keys:
117
+ raise APIKeyError("No valid API keys are available.")
118
+ API_KEY = ",".join(valid_keys)
119
+ logger.info(f"Using API keys: {API_KEY}")
120
 
121
+ def moderate(messages: List[dict]) -> Optional[dict]:
122
  try:
123
  response = requests.post(
124
+ f"{BASE_URL}/moderations",
125
  headers={
126
  "Content-Type": "application/json",
127
+ "Authorization": f"Bearer {get_api_key('api_key')}"
128
  },
129
+ json={"input": encode_chat(messages)}
130
  )
131
  response.raise_for_status()
132
  moderation_result = response.json()
133
+ logger.debug(f"Moderation result: {moderation_result}")
134
+ except requests.RequestException as e:
135
+ logger.error(f"Moderation request failed: {e}. Trying fallback URL.")
136
  try:
137
  response = requests.post(
138
  "https://api.openai.com/v1/moderations",
139
  headers={
140
  "Content-Type": "application/json",
141
+ "Authorization": f"Bearer {get_api_key('oai_api_key')}"
142
  },
143
+ json={"input": encode_chat(messages)}
144
  )
145
  response.raise_for_status()
146
  moderation_result = response.json()
147
+ logger.debug(f"Moderation result from fallback: {moderation_result}")
148
+ except requests.RequestException as ex:
149
+ logger.error(f"Fallback moderation request failed: {ex}")
150
+ return None
151
 
152
  try:
153
+ if isinstance(moderation_result, list):
154
+ flagged = any(result.get("flagged", False) for result in moderation_result)
155
+ else:
156
+ flagged = moderation_result.get("flagged", False)
157
+ if flagged:
158
+ logger.info("Content flagged by moderation.")
159
  return moderation_result
160
+ except KeyError as e:
161
+ logger.error(f"Key error during moderation processing: {e}")
162
+ return None
163
+
164
+ return None
165
 
166
+ async def stream_chat(params: dict):
167
+ async with aiohttp.ClientSession() as session:
168
+ for attempt, url in enumerate([f"{BASE_URL}/chat/completions", "https://api.openai.com/v1/chat/completions"], start=1):
169
  try:
170
+ async with session.post(
171
+ url,
172
+ headers={
173
+ "Authorization": f"Bearer {get_api_key('api_key')}",
174
+ "Content-Type": "application/json"
175
+ },
176
+ json=params
177
+ ) as resp:
178
+ resp.raise_for_status()
179
+ async for line in resp.content:
180
  if line:
181
+ line_str = line.decode('utf-8').strip()
182
  if line_str.startswith("data: "):
183
+ line_str = line_str[6:]
184
  if line_str == "[DONE]":
185
+ break
186
  try:
187
  message = json.loads(line_str)
188
  yield message
189
  except json.JSONDecodeError:
190
+ logger.warning("Failed to decode JSON from line.")
191
  continue
192
+ break # Successful request, exit the loop
193
+ except aiohttp.ClientError as e:
194
+ logger.error(f"Stream chat request failed on attempt {attempt}: {e}")
195
+ if attempt == 2:
 
 
 
 
 
 
 
 
 
 
 
 
 
196
  return
197
 
198
+ def rnd(length: int = 8) -> str:
199
+ result = ''.join(random.choices(string.ascii_letters + string.digits, k=length))
200
+ logger.debug(f"Generated random string: {result}")
201
+ return result
202
 
203
  async def respond(
204
+ message: str,
205
+ history: List[Tuple[str, str]],
206
+ model_name: str,
207
+ max_tokens: int,
208
+ temperature: float,
209
+ top_p: float,
210
+ ) -> AsyncGenerator[str, None]:
211
+ messages = []
212
+ for user_msg, assistant_msg in history:
213
+ if user_msg:
214
+ messages.append({"role": "user", "content": user_msg})
215
+ if assistant_msg:
216
+ messages.append({"role": "assistant", "content": assistant_msg})
 
 
 
217
 
218
  if message:
219
+ messages.append({"role": "user", "content": message})
220
+ moderation = moderate(messages)
221
+ if moderation:
222
  reasons = []
223
+ categories = moderation[0].get('categories', {}) if isinstance(moderation, list) else moderation.get('categories', {})
224
  for category, flagged in categories.items():
225
  if flagged:
226
  reasons.append(category)
227
  if reasons:
228
+ response = "[MODERATION] I'm sorry, but I can't assist with that.\n\nReasons:\n```\n" + "\n".join(f"{i+1}. {reason}" for i, reason in enumerate(reasons)) + "\n```"
229
  else:
230
+ response = "[MODERATION] I'm sorry, but I can't assist with that."
231
+ logger.info("Message flagged by moderation.")
232
+ yield response
233
  return
234
+
235
+ params = {
 
236
  "model": model_name,
237
  "messages": messages,
238
  "max_tokens": max_tokens,
 
240
  "top_p": top_p,
241
  "user": rnd(),
242
  "stream": True
243
+ }
 
 
 
244
 
245
+ response_text = ""
246
+ async for token in stream_chat(params):
247
+ if token and 'choices' in token:
248
+ delta = token['choices'][0].get('delta', {})
249
+ content = delta.get("content", delta.get("refusal", ""))
250
+ response_text += content
251
+ yield response_text
252
+
253
+ def create_gradio_interface() -> gr.ChatInterface:
254
+ return gr.ChatInterface(
255
+ respond,
256
+ title="gpt-4o-mini",
257
+ description="The chat is back online for a not-so-long time.",
258
+ additional_inputs=[
259
+ gr.Dropdown(choices=models, value="gpt-4o-mini", label="Model"),
260
+ gr.Slider(minimum=1, maximum=4096, value=4096, step=1, label="Max new tokens"),
261
+ gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature"),
262
+ gr.Slider(minimum=0.05, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
263
+ ],
264
+ css="footer{display:none !important}",
265
+ head="""<script>
266
+ if(!confirm("By using our application, which integrates with OpenAI's API, you acknowledge and agree to the following terms regarding the data you provide:\\n\\n1. Data Collection: This application may log the following data through the Gradio endpoint or the API endpoint: message requests (including messages, responses, model settings, and images sent along with the messages), images that were generated (including only the prompt and the image), search tool calls (including query, search results, summaries, and output responses), and moderation checks (including input and output).\\n2. Data Retention and Removal: Data is retained until further notice or until a specific request for removal is made.\\n3. Data Usage: The collected data may be used for various purposes, including but not limited to, administrative review of logs, AI training, and publication as a dataset.\\n4. Privacy: Please avoid sharing any personal information.\\n\\nBy continuing to use our application, you explicitly consent to the collection, use, and potential sharing of your data as described above. If you disagree with our data collection, usage, and sharing practices, we advise you not to use our application.")) location.href="/declined";
267
+ </script>"""
268
+ )
269
 
270
+ def create_fastapi_app() -> FastAPI:
271
+ app = FastAPI()
272
 
273
+ @app.get("/declined")
274
+ def declined():
275
+ return HTMLResponse(content="""
276
+ <html>
277
+ <head>
278
+ <title>Declined</title>
279
+ </head>
280
+ <body>
281
+ <p>Ok, you can go back to Hugging Face. I just didn't have any idea how to handle decline so you are redirected here.</p><br/>
282
+ <a href="/">Go back</a>
283
+ </body>
284
+ </html>
285
+ """)
286
 
287
+ gradio_app = create_gradio_interface()
288
+ app = gr.mount_gradio_app(app, gradio_app, path="/")
289
+ return app
290
 
291
  class ArgParser(argparse.ArgumentParser):
292
  def __init__(self, *args, **kwargs):
293
+ super().__init__(*args, **kwargs)
294
+ self.add_argument("-s", "--server", type=str, default="0.0.0.0", help="Server host.")
295
+ self.add_argument("-p", "--port", type=int, default=7860, help="Server port.")
296
+ self.add_argument("-d", "--dev", action="store_true", help="Run in development mode.")
 
 
297
  self.args = self.parse_args(sys.argv[1:])
298
 
299
+ def main():
300
+ try:
301
+ handle_api_keys()
302
+ load_models()
303
+ check_models()
304
+ except APIKeyError as e:
305
+ logger.critical(e)
306
+ sys.exit(1)
307
+
308
+ app = create_fastapi_app()
309
  args = ArgParser().args
 
 
 
 
310
 
311
+ uvicorn.run(
312
+ "main:app",
313
+ host=args.server,
314
+ port=args.port,
315
+ reload=args.dev
316
+ )
317
 
318
+ if __name__ == "__main__":
319
+ main()