woolbot commited on
Commit
9b1524f
1 Parent(s): 11ba6b5
app.py CHANGED
@@ -8,7 +8,7 @@ from routes import *
8
 
9
  #initing
10
  app = Flask(__name__)
11
- VERSION = '1.0 build82'
12
  app.config['JSON_AS_ASCII'] = False
13
  limiter = Limiter(app=app, key_func=get_remote_address, default_limits=["5/minute"], storage_uri="memory://",)
14
 
@@ -85,6 +85,11 @@ def getBMPreview(): return osuApi.getPreview(request)
85
  @app.route('/osu/api/v1/get-full', methods=['GET', 'POST'])
86
  def getBMFull(): return osuApi.getFull(request)
87
 
 
 
 
 
 
88
  if __name__ == "__main__":
89
  config = configFile()
90
  with open(config['config-path'], "w") as outfile:
 
8
 
9
  #initing
10
  app = Flask(__name__)
11
+ VERSION = '1.0 build83'
12
  app.config['JSON_AS_ASCII'] = False
13
  limiter = Limiter(app=app, key_func=get_remote_address, default_limits=["5/minute"], storage_uri="memory://",)
14
 
 
85
  @app.route('/osu/api/v1/get-full', methods=['GET', 'POST'])
86
  def getBMFull(): return osuApi.getFull(request)
87
 
88
+ ##############
89
+ #shh
90
+ @app.route('/aminocaptcha/api/v1/solve', methods=['GET', 'POST'])
91
+ def getBMFull(): return aminoOSRapi.apipredict(request)
92
+
93
  if __name__ == "__main__":
94
  config = configFile()
95
  with open(config['config-path'], "w") as outfile:
requirements.txt CHANGED
@@ -1,7 +1,10 @@
 
1
  wget
2
  flask
 
3
  psutil
4
  yt_dlp
 
5
  urllib3
6
  requests
7
  py-cpuinfo
 
1
+ cv2
2
  wget
3
  flask
4
+ numpy
5
  psutil
6
  yt_dlp
7
+ aiohttp
8
  urllib3
9
  requests
10
  py-cpuinfo
routes/__init__.py CHANGED
@@ -2,4 +2,5 @@ from .ytApi import *
2
  from .osuApi import *
3
  from .helpers import *
4
  from .siteRoutes import *
 
5
  from .recognizeApi import *
 
2
  from .osuApi import *
3
  from .helpers import *
4
  from .siteRoutes import *
5
+ from .aminoOSRapi import *
6
  from .recognizeApi import *
routes/aminoOSRapi/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .main import apipredict
routes/aminoOSRapi/captcha_processor.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from numpy import asarray as np_as_array
3
+ from numpy import all as np_all
4
+
5
+
6
+ class CaptchaProcessor:
7
+
8
+ WHITE_RGB = (255, 255, 255)
9
+
10
+ def __init__(self, data: bytes):
11
+ self.img = cv2.imdecode(
12
+ np_as_array(bytearray(data), dtype="uint8"),
13
+ cv2.IMREAD_ANYCOLOR
14
+ )
15
+
16
+ def threshold(self):
17
+ self.img = cv2.threshold(self.img, 0, 255, cv2.THRESH_OTSU)[1]
18
+
19
+ def convert_color_space(self, target_space: int):
20
+ self.img = cv2.cvtColor(self.img, target_space)
21
+
22
+ def get_background_color(self) -> tuple:
23
+ return tuple(self.img[0, 0])
24
+
25
+ def resize(self, x: int, y: int):
26
+ self.img = cv2.resize(self.img, (x, y))
27
+
28
+ def save(self, name: str):
29
+ cv2.imwrite(name, self.img)
30
+
31
+ def get_letters_color(self) -> tuple:
32
+ colors = []
33
+ for y in range(self.img.shape[1]):
34
+ for x in range(self.img.shape[0]):
35
+ color = tuple(self.img[x, y])
36
+ if color != self.WHITE_RGB: colors.append(color)
37
+ return max(set(colors), key=colors.count)
38
+
39
+ def replace_color(self, target: tuple, to: tuple):
40
+ self.img[np_all(self.img == target, axis=-1)] = to
41
+
42
+ def replace_colors(self, exception: tuple, to: tuple):
43
+ self.img[np_all(self.img != exception, axis=-1)] = to
44
+
45
+ def increase_contrast(self, alpha: float, beta: float):
46
+ self.img = cv2.convertScaleAbs(self.img, alpha=alpha, beta=beta)
47
+
48
+ def increase_letters_size(self, add_pixels: int):
49
+ pixels = []
50
+ for y in range(self.img.shape[1]):
51
+ for x in range(self.img.shape[0]):
52
+ if self.img[x, y] == 0: pixels.append((x, y))
53
+ for y, x in pixels:
54
+ for i in range(1, add_pixels + 1):
55
+ self.img[y + i, x] = 0
56
+ self.img[y - i, x] = 0
57
+ self.img[y, x + i] = 0
58
+ self.img[y, x - i] = 0
59
+ self.img[y + i, x] = 0
60
+ self.img[y - i, x] = 0
61
+ self.img[y, x + i] = 0
62
+ self.img[y, x - i] = 0
63
+
64
+ # Отдаление символов друг от друга
65
+ # Может многократно повысить точность, но я так и не придумал правильную реализацию
66
+ def distance_letters(self, cf: float):
67
+ pixels = []
68
+ for y in range(self.img.shape[1]):
69
+ for x in range(self.img.shape[0]):
70
+ if self.img[x, y] == 0: pixels.append((x, y))
71
+ for y, x in pixels:
72
+ self.img[y, x] = 255
73
+ center = self.img.shape[1] / 2
74
+ z = self.img.shape[1] / x
75
+ if z >= 2: self.img[y, x - int((900 // x) * cf)] = 0
76
+ else: self.img[y, x + int((900 // x) * cf)] = 0
77
+
78
+ def slice_letters(self):
79
+ contours, hierarchy = cv2.findContours(self.img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
80
+ letter_image_regions = []
81
+ letters = []
82
+ for idx, contour in enumerate(contours):
83
+ if hierarchy[0][idx][3] != 0: continue
84
+ (x, y, w, h) = cv2.boundingRect(contour)
85
+ if w / h > 1.5:
86
+ half_width = int(w / 2)
87
+ letter_image_regions.append((idx, x, y, half_width, h))
88
+ letter_image_regions.append((idx, x + half_width, y, half_width, h))
89
+ else:
90
+ letter_image_regions.append((idx, x, y, w, h))
91
+ letter_image_regions = sorted(letter_image_regions, key=lambda z: z[1])
92
+ for _, x, y, w, h in letter_image_regions:
93
+ frame = self.img[y:y + h, x:x + w]
94
+ if frame.shape[1] > 35: continue
95
+ frame = cv2.resize(frame, (20, 40))
96
+ frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
97
+ letters.append(frame)
98
+ return letters
99
+
100
+ def show(self):
101
+ cv2.imshow("Captcha Processor", self.img)
102
+ cv2.waitKey(0)
103
+
104
+ @classmethod
105
+ def from_file_name(cls, name: str):
106
+ file = open(name, "rb")
107
+ processor = cls(file.read())
108
+ file.close()
109
+ return processor
110
+
111
+
routes/aminoOSRapi/main.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from utils import predict
2
+ import asyncio
3
+
4
+ def apipredict(request):
5
+ try:
6
+ if request.method == 'POST': url = request.form['url']
7
+ else: url = request.args['url']
8
+ if url.strip() in ['', None]: raise Exception()
9
+ except: return {"status": "error", "details": { "error_code": 101, "error_details": "No link provided" }}
10
+ loop = asyncio.get_event_loop()
11
+ coroutine = predict(url)
12
+ return loop.run_until_complete(coroutine)
routes/aminoOSRapi/model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:792c015158ffcfaadbb2a65fef9623af7fa1d243e3e1f915444f86c40049ea13
3
+ size 3730536
routes/aminoOSRapi/recognizeVoice.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import wget
2
+ import random
3
+ import string
4
+ import ffmpeg
5
+ from .. import helpers
6
+ import speech_recognition as sr
7
+
8
+ def recognizeVoice(request):
9
+ try:
10
+ if request.method == 'POST': url = request.form['url']
11
+ else: url = request.args['url']
12
+ if url.strip() in ['', None]: raise Exception()
13
+ except: return {"status": "error", "details": { "error_code": 101, "error_details": "No link provided" }}
14
+ try:
15
+ if request.method == 'POST': signature = request.form['signature']
16
+ else: signature = request.args['signature']
17
+ except: return {"status": "error", "details": { "error_code": 103, "error_details": "No signature" }}
18
+ if not helpers.checkSignature(signature): return {"status": "error", "details": { "error_code": 105, "error_details": "Invalid signature" }}
19
+ try:
20
+ if request.method == 'POST': lang = request.form['lang']
21
+ else: lang = request.args['lang']
22
+ if lang.lower() in ['en','en-us']: lang = 'en-US'
23
+ elif lang.lower() in ['ru','ru-ru']: lang = 'ru-RU'
24
+ except: lang = "en-US"
25
+
26
+ fileId = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(16))
27
+ fileExt = url[url.rfind('.'):url.rfind('.')+4]
28
+ if fileExt in [".wav",".mp3",".ogg",'.aac']: pass
29
+ else: return {"status": "error", "details": { "error_code": 111, "error_details": "Wrong file format (only ogg, wav, mp3)" }}
30
+
31
+ r = sr.Recognizer()
32
+
33
+ config = helpers.configFile()
34
+
35
+ wget.download(url, f"{config['temp-path']}/{fileId}{fileExt}")
36
+ if fileExt != ".wav":
37
+ audio_input = ffmpeg.input(f"{config['temp-path']}/{fileId}{fileExt}")
38
+ oldFE = fileExt
39
+ fileExt = ".wav"
40
+ audio_output = ffmpeg.output(audio_input.audio, f"{config['temp-path']}/{fileId}{fileExt}")
41
+ ffmpeg.run(audio_output)
42
+ helpers.deleteAudio(f"temp/{fileId}.{oldFE}")
43
+
44
+ rawSource = sr.AudioFile(f"{config['temp-path']}/{fileId}{fileExt}")
45
+ with rawSource as source:
46
+ r.adjust_for_ambient_noise(source)
47
+ audio = r.record(source)
48
+
49
+ try: googleText = r.recognize_google(audio, language=lang)
50
+ except: googleText = ""
51
+
52
+ # at now here's no keys :(
53
+ #try: houndifyText = r.recognize_houndify(audio)
54
+ #except: houndifyText = ""
55
+
56
+ helpers.deleteAudio(f"temp/{fileId}{fileExt}")
57
+
58
+ return {"status": "pass", "result": {"google": googleText, "houndify": "NOT-WORKING"}}
routes/aminoOSRapi/utils.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras.models import load_model
2
+ from aiohttp import ClientSession
3
+ from numpy import expand_dims as np_expand_dims
4
+ from captcha_processor import CaptchaProcessor
5
+ from asyncio import get_running_loop
6
+
7
+ model = load_model("model.h5")
8
+
9
+
10
+ async def get_binary_from_link(link: str) -> bytes:
11
+ async with ClientSession() as session:
12
+ return await (await session.get(link)).read()
13
+
14
+
15
+ async def predict(url: str, recursion: int = 0) -> dict:
16
+ binary = await get_binary_from_link(url)
17
+ processor = CaptchaProcessor(binary)
18
+ processor.replace_color(processor.get_background_color(), processor.WHITE_RGB)
19
+ processor.replace_colors(processor.get_letters_color(), processor.WHITE_RGB)
20
+ processor.convert_color_space(6)
21
+ processor.threshold()
22
+ try:
23
+ processor.increase_letters_size(2)
24
+ except IndexError:
25
+ return await predict(url, recursion + 1)
26
+ letters = processor.slice_letters()
27
+ if len(letters) != 6: return await predict(url, recursion + 1)
28
+ shorts = []
29
+ final = ""
30
+ letters_solving = [
31
+ get_running_loop().run_in_executor(None, model.predict, np_expand_dims(letter, axis=0))
32
+ for letter in letters
33
+ ]
34
+ letters_solving = [await result for result in letters_solving]
35
+ fulls = [list(map(lambda x: float(x), letter[0])) for letter in letters_solving]
36
+ for prediction in fulls: shorts.append(prediction.index(max(*prediction)))
37
+ for short in shorts: final += str(short)
38
+ return {
39
+ "prediction": final,
40
+ "letters_predictions": shorts,
41
+ "full_prediction": fulls,
42
+ "recursion": recursion
43
+ }