v1 of chess space
Browse files- README.md +3 -11
- app.py +57 -0
- chessfenbot/.DS_Store +0 -0
- chessfenbot/.gitignore +21 -0
- chessfenbot/Dockerfile +31 -0
- chessfenbot/LICENSE +21 -0
- chessfenbot/__init__ +0 -0
- chessfenbot/cfb_helpers.py +22 -0
- chessfenbot/chessboard_finder.py +426 -0
- chessfenbot/chessbot.py +165 -0
- chessfenbot/dataset.py +61 -0
- chessfenbot/example_input.png +0 -0
- chessfenbot/helper_functions.py +172 -0
- chessfenbot/helper_functions_chessbot.py +163 -0
- chessfenbot/helper_image_loading.py +109 -0
- chessfenbot/helper_webkit2png.py +76 -0
- chessfenbot/message_template.py +38 -0
- chessfenbot/readme.md +127 -0
- chessfenbot/requirements.txt +7 -0
- chessfenbot/run_chessbot.sh +4 -0
- chessfenbot/save_graph.py +111 -0
- chessfenbot/saved_models/.DS_Store +0 -0
- chessfenbot/saved_models/cf_v1.0.tflite +0 -0
- chessfenbot/saved_models/checkpoint +2 -0
- chessfenbot/saved_models/frozen_graph.pb +3 -0
- chessfenbot/saved_models/graph.pb +3 -0
- chessfenbot/saved_models/graph.pbtxt +0 -0
- chessfenbot/saved_models/model_10000.ckpt +3 -0
- chessfenbot/tensorflow_chessbot.py +212 -0
- chessfenbot/tileset_generator.py +97 -0
- chessfenbot/webkit2png.py +414 -0
- requirements.txt +4 -0
README.md
CHANGED
@@ -1,12 +1,4 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.0.19
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
# Working on setting up a HuggingFace space for chess, using Gradio
|
2 |
+
|
3 |
+
Step 1: deploy a model to predict the position from an image (using https://github.com/Elucidation/tensorflow_chessbot)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
|
app.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
from chessfenbot.chessboard_finder import findGrayscaleTilesInImage
|
5 |
+
from chessfenbot.tensorflow_chessbot import ChessboardPredictor
|
6 |
+
from chessfenbot.helper_functions import shortenFEN
|
7 |
+
|
8 |
+
|
9 |
+
def predict(img, active="w"):
|
10 |
+
"""
|
11 |
+
main predict function for gradio.
|
12 |
+
Predict a chessboard FEN.
|
13 |
+
Wraps model from https://github.com/Elucidation/tensorflow_chessbot/tree/chessfenbot
|
14 |
+
|
15 |
+
Args:
|
16 |
+
img (PIL image): input image of a chess board
|
17 |
+
active (str): defaults to "w"
|
18 |
+
"""
|
19 |
+
|
20 |
+
# Look for chessboard in image, get corners and split chessboard into tiles
|
21 |
+
tiles, corners = findGrayscaleTilesInImage(img)
|
22 |
+
|
23 |
+
# Initialize predictor, takes a while, but only needed once
|
24 |
+
predictor = ChessboardPredictor(frozen_graph_path='chessfenbot/saved_models/frozen_graph.pb')
|
25 |
+
fen, tile_certainties = predictor.getPrediction(tiles)
|
26 |
+
predictor.close()
|
27 |
+
short_fen = shortenFEN(fen)
|
28 |
+
# Use the worst case certainty as our final uncertainty score
|
29 |
+
certainty = tile_certainties.min()
|
30 |
+
|
31 |
+
print('Per-tile certainty:')
|
32 |
+
print(tile_certainties)
|
33 |
+
print("Certainty range [%g - %g], Avg: %g" % (
|
34 |
+
tile_certainties.min(), tile_certainties.max(), tile_certainties.mean()))
|
35 |
+
|
36 |
+
# predicted FEN
|
37 |
+
fen_out = f"{short_fen} {active} - - 0 1"
|
38 |
+
# certainty
|
39 |
+
certainty = "%.1f%%" % (certainty*100)
|
40 |
+
# link to analysis board on Lichess
|
41 |
+
lichess_link = f'https://lichess.org/analysis/standard/{re.sub(" ", "_", fen_out)}'
|
42 |
+
|
43 |
+
return fen_out, certainty, lichess_link
|
44 |
+
|
45 |
+
|
46 |
+
gr.Interface(
|
47 |
+
predict,
|
48 |
+
inputs=gr.inputs.Image(label="Upload chess board", type="pil"),
|
49 |
+
outputs=[
|
50 |
+
gr.Textbox(label="FEN"),
|
51 |
+
gr.Textbox(label="certainty"),
|
52 |
+
gr.Textbox(label="Link to Lichess analysis board (copy and paste into URL)"),
|
53 |
+
],
|
54 |
+
title="Chess FEN bot",
|
55 |
+
examples=["chessfenbot/example_input.png"],
|
56 |
+
description="Simple wrapper around TensorFlow Chessbot (https://github.com/Elucidation/tensorflow_chessbot)"
|
57 |
+
).launch()
|
chessfenbot/.DS_Store
ADDED
Binary file (8.2 kB). View file
|
|
chessfenbot/.gitignore
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.ipynb_checkpoints
|
2 |
+
*.pyc
|
3 |
+
*.png
|
4 |
+
*.jpg
|
5 |
+
*.gif
|
6 |
+
|
7 |
+
!example_input.png
|
8 |
+
|
9 |
+
# Ignore chessboard input images and tile outputs
|
10 |
+
chessboards/
|
11 |
+
tiles/
|
12 |
+
|
13 |
+
# Ignore reddit username/password config file
|
14 |
+
auth_config.py
|
15 |
+
praw.ini
|
16 |
+
|
17 |
+
# Ignore tracking files
|
18 |
+
*.txt
|
19 |
+
!requirements.txt
|
20 |
+
|
21 |
+
!readme_images/*
|
chessfenbot/Dockerfile
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM tensorflow/tensorflow
|
2 |
+
MAINTAINER Sam <elucidation@gmail.com>
|
3 |
+
|
4 |
+
# Install python and pip and use pip to install the python reddit api PRAW
|
5 |
+
RUN apt-get -y update && apt-get install -y \
|
6 |
+
python-dev \
|
7 |
+
libxml2-dev \
|
8 |
+
libxslt1-dev \
|
9 |
+
libjpeg-dev \
|
10 |
+
vim \
|
11 |
+
&& apt-get clean
|
12 |
+
|
13 |
+
# Install python reddit api related files
|
14 |
+
RUN pip install praw==4.3.0 beautifulsoup4==4.4.1 lxml==3.3.3 Pillow==4.0.0 html5lib==1.0b8
|
15 |
+
|
16 |
+
# Clean up APT when done.
|
17 |
+
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
18 |
+
|
19 |
+
# Remove jupyter related files
|
20 |
+
RUN rm -rf /notebooks /run_jupyter.sh
|
21 |
+
|
22 |
+
# Copy code over
|
23 |
+
COPY . /tcb/
|
24 |
+
|
25 |
+
WORKDIR /tcb
|
26 |
+
|
27 |
+
# Run chessbot by default
|
28 |
+
CMD ["/tcb/run_chessbot.sh"]
|
29 |
+
|
30 |
+
# Start up the docker instance with the proper auth file using
|
31 |
+
# <machine>$ docker run -dt --rm --name cfb -v <local_auth_file>:/tcb/auth_config.py elucidation/tensorflow_chessbot
|
chessfenbot/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
The MIT License (MIT)
|
2 |
+
|
3 |
+
Copyright (c) 2016 Sameer Ansari
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
chessfenbot/__init__
ADDED
File without changes
|
chessfenbot/cfb_helpers.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from datetime import datetime
|
3 |
+
|
4 |
+
# Check if submission has a comment by this bot already
|
5 |
+
def previouslyRepliedTo(submission, me):
|
6 |
+
for comment in submission.comments:
|
7 |
+
if comment.author == me:
|
8 |
+
return True
|
9 |
+
return False
|
10 |
+
|
11 |
+
|
12 |
+
def waitWithComments(sleep_time, segment=60):
|
13 |
+
"""Sleep for sleep_time seconds, printing to stdout every segment of time"""
|
14 |
+
print("\t%s - %s seconds to go..." % (datetime.now(), sleep_time))
|
15 |
+
while sleep_time > segment:
|
16 |
+
time.sleep(segment) # sleep in increments of 1 minute
|
17 |
+
sleep_time -= segment
|
18 |
+
print("\t%s - %s seconds to go..." % (datetime.now(), sleep_time))
|
19 |
+
time.sleep(sleep_time)
|
20 |
+
|
21 |
+
def logMessage(submission, status=""):
|
22 |
+
print("{} | {} {}: {}".format(datetime.now(), submission.id, status, submission.title.encode('utf-8')))
|
chessfenbot/chessboard_finder.py
ADDED
@@ -0,0 +1,426 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
# Pass in image of online chessboard screenshot, returns corners of chessboard
|
4 |
+
# usage: chessboard_finder.py [-h] urls [urls ...]
|
5 |
+
|
6 |
+
# Find orthorectified chessboard corners in image
|
7 |
+
|
8 |
+
# positional arguments:
|
9 |
+
# urls Input image urls
|
10 |
+
|
11 |
+
# optional arguments:
|
12 |
+
# -h, --help show this help message and exit
|
13 |
+
|
14 |
+
|
15 |
+
# sudo apt-get install libatlas-base-dev for numpy error, see https://github.com/Kitt-AI/snowboy/issues/262
|
16 |
+
import numpy as np
|
17 |
+
# sudo apt-get install libopenjp2-7 libtiff5
|
18 |
+
import PIL.Image
|
19 |
+
import argparse
|
20 |
+
from time import time
|
21 |
+
from .helper_image_loading import *
|
22 |
+
|
23 |
+
|
24 |
+
def nonmax_suppress_1d(arr, winsize=5):
|
25 |
+
"""Return 1d array with only peaks, use neighborhood window of winsize px"""
|
26 |
+
_arr = arr.copy()
|
27 |
+
|
28 |
+
for i in range(_arr.size):
|
29 |
+
if i == 0:
|
30 |
+
left_neighborhood = 0
|
31 |
+
else:
|
32 |
+
left_neighborhood = arr[max(0,i-winsize):i]
|
33 |
+
if i >= _arr.size-2:
|
34 |
+
right_neighborhood = 0
|
35 |
+
else:
|
36 |
+
right_neighborhood = arr[i+1:min(arr.size-1,i+winsize)]
|
37 |
+
|
38 |
+
if arr[i] < np.max(left_neighborhood) or arr[i] <= np.max(right_neighborhood):
|
39 |
+
_arr[i] = 0
|
40 |
+
return _arr
|
41 |
+
|
42 |
+
def findChessboardCorners(img_arr_gray, noise_threshold = 8000):
|
43 |
+
# Load image grayscale as an numpy array
|
44 |
+
# Return None on failure to find a chessboard
|
45 |
+
#
|
46 |
+
# noise_threshold: Ratio of standard deviation of hough values along an axis
|
47 |
+
# versus the number of pixels, manually measured bad trigger images
|
48 |
+
# at < 5,000 and good chessboards values at > 10,000
|
49 |
+
|
50 |
+
# Get gradients, split into positive and inverted negative components
|
51 |
+
gx, gy = np.gradient(img_arr_gray)
|
52 |
+
gx_pos = gx.copy()
|
53 |
+
gx_pos[gx_pos<0] = 0
|
54 |
+
gx_neg = -gx.copy()
|
55 |
+
gx_neg[gx_neg<0] = 0
|
56 |
+
|
57 |
+
gy_pos = gy.copy()
|
58 |
+
gy_pos[gy_pos<0] = 0
|
59 |
+
gy_neg = -gy.copy()
|
60 |
+
gy_neg[gy_neg<0] = 0
|
61 |
+
|
62 |
+
# 1-D ampltitude of hough transform of gradients about X & Y axes
|
63 |
+
num_px = img_arr_gray.shape[0] * img_arr_gray.shape[1]
|
64 |
+
hough_gx = gx_pos.sum(axis=1) * gx_neg.sum(axis=1)
|
65 |
+
hough_gy = gy_pos.sum(axis=0) * gy_neg.sum(axis=0)
|
66 |
+
|
67 |
+
# Check that gradient peak signal is strong enough by
|
68 |
+
# comparing normalized standard deviation to threshold
|
69 |
+
if min(hough_gx.std() / hough_gx.size,
|
70 |
+
hough_gy.std() / hough_gy.size) < noise_threshold:
|
71 |
+
return None
|
72 |
+
|
73 |
+
# Normalize and skeletonize to just local peaks
|
74 |
+
hough_gx = nonmax_suppress_1d(hough_gx) / hough_gx.max()
|
75 |
+
hough_gy = nonmax_suppress_1d(hough_gy) / hough_gy.max()
|
76 |
+
|
77 |
+
# Arbitrary threshold of 20% of max
|
78 |
+
hough_gx[hough_gx<0.2] = 0
|
79 |
+
hough_gy[hough_gy<0.2] = 0
|
80 |
+
|
81 |
+
# Now we have a set of potential vertical and horizontal lines that
|
82 |
+
# may contain some noisy readings, try different subsets of them with
|
83 |
+
# consistent spacing until we get a set of 7, choose strongest set of 7
|
84 |
+
pot_lines_x = np.where(hough_gx)[0]
|
85 |
+
pot_lines_y = np.where(hough_gy)[0]
|
86 |
+
pot_lines_x_vals = hough_gx[pot_lines_x]
|
87 |
+
pot_lines_y_vals = hough_gy[pot_lines_y]
|
88 |
+
|
89 |
+
# Get all possible length 7+ sequences
|
90 |
+
seqs_x = getAllSequences(pot_lines_x)
|
91 |
+
seqs_y = getAllSequences(pot_lines_y)
|
92 |
+
|
93 |
+
if len(seqs_x) == 0 or len(seqs_y) == 0:
|
94 |
+
return None
|
95 |
+
|
96 |
+
# Score sequences by the strength of their hough peaks
|
97 |
+
seqs_x_vals = [pot_lines_x_vals[[v in seq for v in pot_lines_x]] for seq in seqs_x]
|
98 |
+
seqs_y_vals = [pot_lines_y_vals[[v in seq for v in pot_lines_y]] for seq in seqs_y]
|
99 |
+
|
100 |
+
# shorten sequences to up to 9 values based on score
|
101 |
+
# X sequences
|
102 |
+
for i in range(len(seqs_x)):
|
103 |
+
seq = seqs_x[i]
|
104 |
+
seq_val = seqs_x_vals[i]
|
105 |
+
|
106 |
+
# if the length of sequence is more than 7 + edges = 9
|
107 |
+
# strip weakest edges
|
108 |
+
if len(seq) > 9:
|
109 |
+
# while not inner 7 chess lines, strip weakest edges
|
110 |
+
while len(seq) > 7:
|
111 |
+
if seq_val[0] > seq_val[-1]:
|
112 |
+
seq = seq[:-1]
|
113 |
+
seq_val = seq_val[:-1]
|
114 |
+
else:
|
115 |
+
seq = seq[1:]
|
116 |
+
seq_val = seq_val[1:]
|
117 |
+
|
118 |
+
seqs_x[i] = seq
|
119 |
+
seqs_x_vals[i] = seq_val
|
120 |
+
|
121 |
+
# Y sequences
|
122 |
+
for i in range(len(seqs_y)):
|
123 |
+
seq = seqs_y[i]
|
124 |
+
seq_val = seqs_y_vals[i]
|
125 |
+
|
126 |
+
while len(seq) > 9:
|
127 |
+
if seq_val[0] > seq_val[-1]:
|
128 |
+
seq = seq[:-1]
|
129 |
+
seq_val = seq_val[:-1]
|
130 |
+
else:
|
131 |
+
seq = seq[1:]
|
132 |
+
seq_val = seq_val[1:]
|
133 |
+
|
134 |
+
seqs_y[i] = seq
|
135 |
+
seqs_y_vals[i] = seq_val
|
136 |
+
|
137 |
+
# Now that we only have length 7-9 sequences, score and choose the best one
|
138 |
+
scores_x = np.array([np.mean(v) for v in seqs_x_vals])
|
139 |
+
scores_y = np.array([np.mean(v) for v in seqs_y_vals])
|
140 |
+
|
141 |
+
# Keep first sequence with the largest step size
|
142 |
+
# scores_x = np.array([np.median(np.diff(s)) for s in seqs_x])
|
143 |
+
# scores_y = np.array([np.median(np.diff(s)) for s in seqs_y])
|
144 |
+
|
145 |
+
#TODO(elucidation): Choose heuristic score between step size and hough response
|
146 |
+
|
147 |
+
best_seq_x = seqs_x[scores_x.argmax()]
|
148 |
+
best_seq_y = seqs_y[scores_y.argmax()]
|
149 |
+
# print(best_seq_x, best_seq_y)
|
150 |
+
|
151 |
+
# Now if we have sequences greater than length 7, (up to 9),
|
152 |
+
# that means we have up to 9 possible combinations of sets of 7 sequences
|
153 |
+
# We try all of them and see which has the best checkerboard response
|
154 |
+
sub_seqs_x = [best_seq_x[k:k+7] for k in range(len(best_seq_x) - 7 + 1)]
|
155 |
+
sub_seqs_y = [best_seq_y[k:k+7] for k in range(len(best_seq_y) - 7 + 1)]
|
156 |
+
|
157 |
+
dx = np.median(np.diff(best_seq_x))
|
158 |
+
dy = np.median(np.diff(best_seq_y))
|
159 |
+
corners = np.zeros(4, dtype=int)
|
160 |
+
|
161 |
+
# Add 1 buffer to include the outer tiles, since sequences are only using
|
162 |
+
# inner chessboard lines
|
163 |
+
corners[0] = int(best_seq_y[0]-dy)
|
164 |
+
corners[1] = int(best_seq_x[0]-dx)
|
165 |
+
corners[2] = int(best_seq_y[-1]+dy)
|
166 |
+
corners[3] = int(best_seq_x[-1]+dx)
|
167 |
+
|
168 |
+
# Generate crop image with on full sequence, which may be wider than a normal
|
169 |
+
# chessboard by an extra 2 tiles, we'll iterate over all combinations
|
170 |
+
# (up to 9) and choose the one that correlates best with a chessboard
|
171 |
+
gray_img_crop = PIL.Image.fromarray(img_arr_gray).crop(corners)
|
172 |
+
|
173 |
+
# Build a kernel image of an idea chessboard to correlate against
|
174 |
+
k = 8 # Arbitrarily chose 8x8 pixel tiles for correlation image
|
175 |
+
quad = np.ones([k,k])
|
176 |
+
kernel = np.vstack([np.hstack([quad,-quad]), np.hstack([-quad,quad])])
|
177 |
+
kernel = np.tile(kernel,(4,4)) # Becomes an 8x8 alternating grid (chessboard)
|
178 |
+
kernel = kernel/np.linalg.norm(kernel) # normalize
|
179 |
+
# 8*8 = 64x64 pixel ideal chessboard
|
180 |
+
|
181 |
+
k = 0
|
182 |
+
n = max(len(sub_seqs_x), len(sub_seqs_y))
|
183 |
+
final_corners = None
|
184 |
+
best_score = None
|
185 |
+
|
186 |
+
# Iterate over all possible combinations of sub sequences and keep the corners
|
187 |
+
# with the best correlation response to the ideal 64x64px chessboard
|
188 |
+
for i in range(len(sub_seqs_x)):
|
189 |
+
for j in range(len(sub_seqs_y)):
|
190 |
+
k = k + 1
|
191 |
+
|
192 |
+
# [y, x, y, x]
|
193 |
+
sub_corners = np.array([
|
194 |
+
sub_seqs_y[j][0]-corners[0]-dy, sub_seqs_x[i][0]-corners[1]-dx,
|
195 |
+
sub_seqs_y[j][-1]-corners[0]+dy, sub_seqs_x[i][-1]-corners[1]+dx],
|
196 |
+
dtype=np.int)
|
197 |
+
|
198 |
+
# Generate crop candidate, nearest pixel is fine for correlation check
|
199 |
+
sub_img = gray_img_crop.crop(sub_corners).resize((64,64))
|
200 |
+
|
201 |
+
# Perform correlation score, keep running best corners as our final output
|
202 |
+
# Use absolute since it's possible board is rotated 90 deg
|
203 |
+
score = np.abs(np.sum(kernel * sub_img))
|
204 |
+
if best_score is None or score > best_score:
|
205 |
+
best_score = score
|
206 |
+
final_corners = sub_corners + [corners[0], corners[1], corners[0], corners[1]]
|
207 |
+
|
208 |
+
return final_corners
|
209 |
+
|
210 |
+
def getAllSequences(seq, min_seq_len=7, err_px=5):
|
211 |
+
"""Given sequence of increasing numbers, get all sequences with common
|
212 |
+
spacing (within err_px) that contain at least min_seq_len values"""
|
213 |
+
|
214 |
+
# Sanity check that there are enough values to satisfy
|
215 |
+
if len(seq) < min_seq_len:
|
216 |
+
return []
|
217 |
+
|
218 |
+
# For every value, take the next value and see how many times we can step
|
219 |
+
# that falls on another value within err_px points
|
220 |
+
seqs = []
|
221 |
+
for i in range(len(seq)-1):
|
222 |
+
for j in range(i+1, len(seq)):
|
223 |
+
# Check that seq[i], seq[j] not already in previous sequences
|
224 |
+
duplicate = False
|
225 |
+
for prev_seq in seqs:
|
226 |
+
for k in range(len(prev_seq)-1):
|
227 |
+
if seq[i] == prev_seq[k] and seq[j] == prev_seq[k+1]:
|
228 |
+
duplicate = True
|
229 |
+
if duplicate:
|
230 |
+
continue
|
231 |
+
d = seq[j] - seq[i]
|
232 |
+
|
233 |
+
# Ignore two points that are within error bounds of each other
|
234 |
+
if d < err_px:
|
235 |
+
continue
|
236 |
+
|
237 |
+
s = [seq[i], seq[j]]
|
238 |
+
n = s[-1] + d
|
239 |
+
while np.abs((seq-n)).min() < err_px:
|
240 |
+
n = seq[np.abs((seq-n)).argmin()]
|
241 |
+
s.append(n)
|
242 |
+
n = s[-1] + d
|
243 |
+
|
244 |
+
if len(s) >= min_seq_len:
|
245 |
+
s = np.array(s)
|
246 |
+
seqs.append(s)
|
247 |
+
return seqs
|
248 |
+
|
249 |
+
def getChessTilesColor(img, corners):
|
250 |
+
# img is a color RGB image
|
251 |
+
# corners = (x0, y0, x1, y1) for top-left corner to bot-right corner of board
|
252 |
+
height, width, depth = img.shape
|
253 |
+
if depth !=3:
|
254 |
+
print("Need RGB color image input")
|
255 |
+
return None
|
256 |
+
|
257 |
+
# corners could be outside image bounds, pad image as needed
|
258 |
+
padl_x = max(0, -corners[0])
|
259 |
+
padl_y = max(0, -corners[1])
|
260 |
+
padr_x = max(0, corners[2] - width)
|
261 |
+
padr_y = max(0, corners[3] - height)
|
262 |
+
|
263 |
+
img_padded = np.pad(img, ((padl_y,padr_y),(padl_x,padr_x), (0,0)), mode='edge')
|
264 |
+
|
265 |
+
chessboard_img = img_padded[
|
266 |
+
(padl_y + corners[1]):(padl_y + corners[3]),
|
267 |
+
(padl_x + corners[0]):(padl_x + corners[2]), :]
|
268 |
+
|
269 |
+
# 256x256 px RGB image, 32x32px individual RGB tiles, normalized 0-1 floats
|
270 |
+
chessboard_img_resized = np.asarray( \
|
271 |
+
PIL.Image.fromarray(chessboard_img) \
|
272 |
+
.resize([256,256], PIL.Image.BILINEAR), dtype=np.float32) / 255.0
|
273 |
+
|
274 |
+
# stack deep 64 tiles with 3 channesl RGB each
|
275 |
+
# so, first 3 slabs are RGB for tile A1, then next 3 slabs for tile A2 etc.
|
276 |
+
tiles = np.zeros([32,32,3*64], dtype=np.float32) # color
|
277 |
+
# Assume A1 is bottom left of image, need to reverse rank since images start
|
278 |
+
# with origin in top left
|
279 |
+
for rank in range(8): # rows (numbers)
|
280 |
+
for file in range(8): # columns (letters)
|
281 |
+
# color
|
282 |
+
tiles[:,:,3*(rank*8+file):3*(rank*8+file+1)] = \
|
283 |
+
chessboard_img_resized[(7-rank)*32:((7-rank)+1)*32,file*32:(file+1)*32]
|
284 |
+
|
285 |
+
return tiles
|
286 |
+
|
287 |
+
def getChessBoardGray(img, corners):
|
288 |
+
# img is a grayscale image
|
289 |
+
# corners = (x0, y0, x1, y1) for top-left corner to bot-right corner of board
|
290 |
+
height, width = img.shape
|
291 |
+
|
292 |
+
# corners could be outside image bounds, pad image as needed
|
293 |
+
padl_x = max(0, -corners[0])
|
294 |
+
padl_y = max(0, -corners[1])
|
295 |
+
padr_x = max(0, corners[2] - width)
|
296 |
+
padr_y = max(0, corners[3] - height)
|
297 |
+
|
298 |
+
img_padded = np.pad(img, ((padl_y,padr_y),(padl_x,padr_x)), mode='edge')
|
299 |
+
|
300 |
+
chessboard_img = img_padded[
|
301 |
+
(padl_y + corners[1]):(padl_y + corners[3]),
|
302 |
+
(padl_x + corners[0]):(padl_x + corners[2])]
|
303 |
+
|
304 |
+
# 256x256 px image, 32x32px individual tiles
|
305 |
+
# Normalized
|
306 |
+
chessboard_img_resized = np.asarray( \
|
307 |
+
PIL.Image.fromarray(chessboard_img) \
|
308 |
+
.resize([256,256], PIL.Image.BILINEAR), dtype=np.uint8) / 255.0
|
309 |
+
return chessboard_img_resized
|
310 |
+
|
311 |
+
def getChessTilesGray(img, corners):
|
312 |
+
chessboard_img_resized = getChessBoardGray(img, corners)
|
313 |
+
return getTiles(chessboard_img_resized)
|
314 |
+
|
315 |
+
|
316 |
+
def getTiles(processed_gray_img):
|
317 |
+
# Given 256x256 px normalized grayscale image of a chessboard (32x32px per tile)
|
318 |
+
# NOTE (values must be in range 0-1)
|
319 |
+
# Return a 32x32x64 tile array
|
320 |
+
#
|
321 |
+
# stack deep 64 tiles
|
322 |
+
# so, first slab is tile A1, then A2 etc.
|
323 |
+
tiles = np.zeros([32,32,64], dtype=np.float32) # grayscale
|
324 |
+
# Assume A1 is bottom left of image, need to reverse rank since images start
|
325 |
+
# with origin in top left
|
326 |
+
for rank in range(8): # rows (numbers)
|
327 |
+
for file in range(8): # columns (letters)
|
328 |
+
tiles[:,:,(rank*8+file)] = \
|
329 |
+
processed_gray_img[(7-rank)*32:((7-rank)+1)*32,file*32:(file+1)*32]
|
330 |
+
|
331 |
+
return tiles
|
332 |
+
|
333 |
+
def findGrayscaleTilesInImage(img):
|
334 |
+
""" Find chessboard and convert into input tiles for CNN """
|
335 |
+
if img is None:
|
336 |
+
return None, None
|
337 |
+
|
338 |
+
# Convert to grayscale numpy array
|
339 |
+
img_arr = np.asarray(img.convert("L"), dtype=np.float32)
|
340 |
+
|
341 |
+
# Use computer vision to find orthorectified chessboard corners in image
|
342 |
+
corners = findChessboardCorners(img_arr)
|
343 |
+
if corners is None:
|
344 |
+
return None, None
|
345 |
+
|
346 |
+
# Pull grayscale tiles out given image and chessboard corners
|
347 |
+
tiles = getChessTilesGray(img_arr, corners)
|
348 |
+
|
349 |
+
# Return both the tiles as well as chessboard corner locations in the image
|
350 |
+
return tiles, corners
|
351 |
+
|
352 |
+
# DEBUG
|
353 |
+
# from matplotlib import pyplot as plt
|
354 |
+
# def plotTiles(tiles):
|
355 |
+
# """Plot color or grayscale tiles as 8x8 subplots"""
|
356 |
+
# plt.figure(figsize=(6,6))
|
357 |
+
# files = "ABCDEFGH"
|
358 |
+
# for rank in range(8): # rows (numbers)
|
359 |
+
# for file in range(8): # columns (letters)
|
360 |
+
# plt.subplot(8,8,(7-rank)*8 + file + 1) # Plot rank reverse order to match image
|
361 |
+
|
362 |
+
# if tiles.shape[2] == 64:
|
363 |
+
# # Grayscale
|
364 |
+
# tile = tiles[:,:,(rank*8+file)] # grayscale
|
365 |
+
# plt.imshow(tile, interpolation='None', cmap='gray', vmin = 0, vmax = 1)
|
366 |
+
# else:
|
367 |
+
# #Color
|
368 |
+
# tile = tiles[:,:,3*(rank*8+file):3*(rank*8+file+1)] # color
|
369 |
+
# plt.imshow(tile, interpolation='None',)
|
370 |
+
|
371 |
+
# plt.axis('off')
|
372 |
+
# plt.title('%s %d' % (files[file], rank+1), fontsize=6)
|
373 |
+
# plt.show()
|
374 |
+
|
375 |
+
def main(url):
|
376 |
+
print("Loading url %s..." % url)
|
377 |
+
color_img, url = loadImageFromURL(url)
|
378 |
+
|
379 |
+
# Fail if can't load image
|
380 |
+
if color_img is None:
|
381 |
+
print('Couldn\'t load url: %s' % url)
|
382 |
+
return
|
383 |
+
|
384 |
+
if color_img.mode != 'RGB':
|
385 |
+
color_img = color_img.convert('RGB')
|
386 |
+
print("Processing...")
|
387 |
+
a = time()
|
388 |
+
img_arr = np.asarray(color_img.convert("L"), dtype=np.float32)
|
389 |
+
corners = findChessboardCorners(img_arr)
|
390 |
+
print("Took %.4fs" % (time()-a))
|
391 |
+
# corners = [x0, y0, x1, y1] where (x0,y0)
|
392 |
+
# is top left and (x1,y1) is bot right
|
393 |
+
|
394 |
+
if corners is not None:
|
395 |
+
print("\tFound corners for %s: %s" % (url, corners))
|
396 |
+
link = getVisualizeLink(corners, url)
|
397 |
+
print(link)
|
398 |
+
|
399 |
+
# tiles = getChessTilesColor(np.array(color_img), corners)
|
400 |
+
# tiles = getChessTilesGray(img_arr, corners)
|
401 |
+
# plotTiles(tiles)
|
402 |
+
|
403 |
+
# plt.imshow(color_img, interpolation='none')
|
404 |
+
# plt.plot(corners[[0,0,2,2,0]]-0.5, corners[[1,3,3,1,1]]-0.5, color='red', linewidth=1)
|
405 |
+
# plt.show()
|
406 |
+
else:
|
407 |
+
print('\tNo corners found in image')
|
408 |
+
|
409 |
+
if __name__ == '__main__':
|
410 |
+
np.set_printoptions(suppress=True, precision=2)
|
411 |
+
parser = argparse.ArgumentParser(description='Find orthorectified chessboard corners in image')
|
412 |
+
parser.add_argument('urls', default=['https://i.redd.it/1uw3h772r0fy.png'],
|
413 |
+
metavar='urls', type=str, nargs='*', help='Input image urls')
|
414 |
+
# main('http://www.chessanytime.com/img/jeudirect/simplechess.png')
|
415 |
+
# main('https://i.imgur.com/JpzfV3y.jpg')
|
416 |
+
# main('https://i.imgur.com/jsCKzU9.jpg')
|
417 |
+
# main('https://i.imgur.com/49htmMA.png')
|
418 |
+
# main('https://i.imgur.com/HHdHGBX.png')
|
419 |
+
# main('http://imgur.com/By2xJkO')
|
420 |
+
# main('http://imgur.com/p8DJMly')
|
421 |
+
# main('https://i.imgur.com/Ns0iBrw.jpg')
|
422 |
+
# main('https://i.imgur.com/KLcCiuk.jpg')
|
423 |
+
args = parser.parse_args()
|
424 |
+
for url in args.urls:
|
425 |
+
main(url)
|
426 |
+
|
chessfenbot/chessbot.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# ChessFenBot daemon
|
3 |
+
# Finds submissions with chessboard images in them,
|
4 |
+
# use a tensorflow convolutional neural network to predict pieces and return
|
5 |
+
# a lichess analysis link and FEN diagram of chessboard
|
6 |
+
# Run with --dry to dry run without actual submissions
|
7 |
+
from __future__ import print_function
|
8 |
+
import praw
|
9 |
+
import requests
|
10 |
+
import socket
|
11 |
+
import time
|
12 |
+
from datetime import datetime
|
13 |
+
import argparse
|
14 |
+
|
15 |
+
import tensorflow_chessbot # For neural network model
|
16 |
+
from helper_functions_chessbot import *
|
17 |
+
from helper_functions import shortenFEN
|
18 |
+
from cfb_helpers import * # logging, comment waiting and self-reply helpers
|
19 |
+
|
20 |
+
def generateResponseMessage(submission, predictor):
|
21 |
+
print("\n---\nImage URL: %s" % submission.url)
|
22 |
+
|
23 |
+
# Use CNN to make a prediction
|
24 |
+
fen, certainty, visualize_link = predictor.makePrediction(submission.url)
|
25 |
+
|
26 |
+
if fen is None:
|
27 |
+
print("> %s - Couldn't generate FEN, skipping..." % datetime.now())
|
28 |
+
print("\n---\n")
|
29 |
+
return None
|
30 |
+
|
31 |
+
fen = shortenFEN(fen) # ex. '111pq11r' -> '3pq2r'
|
32 |
+
print("Predicted FEN: %s" % fen)
|
33 |
+
print("Certainty: %.4f%%" % (certainty*100))
|
34 |
+
|
35 |
+
# Get side from title or fen
|
36 |
+
side = getSideToPlay(submission.title, fen)
|
37 |
+
# Generate response message
|
38 |
+
msg = generateMessage(fen, certainty, side, visualize_link)
|
39 |
+
print("fen: %s\nside: %s\n" % (fen, side))
|
40 |
+
return msg
|
41 |
+
|
42 |
+
|
43 |
+
def processSubmission(submission, cfb, predictor, args, reply_wait_time=10):
|
44 |
+
# Check if submission passes requirements and wasn't already replied to
|
45 |
+
if isPotentialChessboardTopic(submission):
|
46 |
+
if not previouslyRepliedTo(submission, cfb):
|
47 |
+
# Generate response
|
48 |
+
response = generateResponseMessage(submission, predictor)
|
49 |
+
if response is None:
|
50 |
+
logMessage(submission,"[NO-FEN]") # Skip since couldn't generate FEN
|
51 |
+
return
|
52 |
+
|
53 |
+
# Reply to submission with response
|
54 |
+
if not args.dry:
|
55 |
+
logMessage(submission,"[REPLIED]")
|
56 |
+
submission.reply(response)
|
57 |
+
else:
|
58 |
+
logMessage(submission,"[DRY-RUN-REPLIED]")
|
59 |
+
|
60 |
+
# Wait after submitting to not overload
|
61 |
+
waitWithComments(reply_wait_time)
|
62 |
+
else:
|
63 |
+
logMessage(submission,"[SKIP]") # Skip since replied to already
|
64 |
+
|
65 |
+
else:
|
66 |
+
logMessage(submission)
|
67 |
+
time.sleep(1) # Wait a second between normal submissions
|
68 |
+
|
69 |
+
def main(args):
|
70 |
+
resetTensorflowGraph()
|
71 |
+
running = True
|
72 |
+
reddit = praw.Reddit('CFB') # client credentials set up in local praw.ini file
|
73 |
+
cfb = reddit.user.me() # ChessFenBot object
|
74 |
+
subreddit = reddit.subreddit('chess+chessbeginners+AnarchyChess+betterchess+chesspuzzles')
|
75 |
+
predictor = tensorflow_chessbot.ChessboardPredictor()
|
76 |
+
|
77 |
+
while running:
|
78 |
+
# Start live stream on all submissions in the subreddit
|
79 |
+
stream = subreddit.stream.submissions()
|
80 |
+
try:
|
81 |
+
for submission in stream:
|
82 |
+
processSubmission(submission, cfb, predictor, args)
|
83 |
+
except (socket.error, requests.exceptions.ReadTimeout,
|
84 |
+
requests.packages.urllib3.exceptions.ReadTimeoutError,
|
85 |
+
requests.exceptions.ConnectionError) as e:
|
86 |
+
print(
|
87 |
+
"> %s - Connection error, skipping and continuing in 30 seconds: %s" % (
|
88 |
+
datetime.now(), e))
|
89 |
+
time.sleep(30)
|
90 |
+
continue
|
91 |
+
except Exception as e:
|
92 |
+
print("Unknown Error, skipping and continuing in 30 seconds:",e)
|
93 |
+
time.sleep(30)
|
94 |
+
continue
|
95 |
+
except KeyboardInterrupt:
|
96 |
+
print("Keyboard Interrupt: Exiting...")
|
97 |
+
running = False
|
98 |
+
break
|
99 |
+
|
100 |
+
predictor.close()
|
101 |
+
print('Finished')
|
102 |
+
|
103 |
+
def resetTensorflowGraph():
|
104 |
+
"""WIP needed to restart predictor after an error"""
|
105 |
+
import tensorflow as tf
|
106 |
+
print('Reset TF graph')
|
107 |
+
tf.reset_default_graph() # clear out graph
|
108 |
+
|
109 |
+
def runSpecificSubmission(args):
|
110 |
+
resetTensorflowGraph()
|
111 |
+
reddit = praw.Reddit('CFB') # client credentials set up in local praw.ini file
|
112 |
+
cfb = reddit.user.me() # ChessFenBot object
|
113 |
+
predictor = tensorflow_chessbot.ChessboardPredictor()
|
114 |
+
|
115 |
+
submission = reddit.submission(args.sub)
|
116 |
+
print("URL: ", submission.url)
|
117 |
+
if submission:
|
118 |
+
print('Processing...')
|
119 |
+
processSubmission(submission, cfb, predictor, args)
|
120 |
+
|
121 |
+
predictor.close()
|
122 |
+
print('Done')
|
123 |
+
|
124 |
+
def dryRunTest(submission='5tuerh'):
|
125 |
+
resetTensorflowGraph()
|
126 |
+
reddit = praw.Reddit('CFB') # client credentials set up in local praw.ini file
|
127 |
+
predictor = tensorflow_chessbot.ChessboardPredictor()
|
128 |
+
|
129 |
+
# Use a specific submission
|
130 |
+
submission = reddit.submission(submission)
|
131 |
+
print('Loading %s' % submission.id)
|
132 |
+
# Check if submission passes requirements and wasn't already replied to
|
133 |
+
if isPotentialChessboardTopic(submission):
|
134 |
+
# Generate response
|
135 |
+
response = generateResponseMessage(submission, predictor)
|
136 |
+
print("RESPONSE:\n")
|
137 |
+
print('-----------------------------')
|
138 |
+
print(response)
|
139 |
+
print('-----------------------------')
|
140 |
+
else:
|
141 |
+
print('Submission not considered chessboard topic')
|
142 |
+
|
143 |
+
predictor.close()
|
144 |
+
print('Finished')
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
if __name__ == '__main__':
|
149 |
+
parser = argparse.ArgumentParser()
|
150 |
+
parser.add_argument('--dry', help='dry run (don\'t actually submit replies)',
|
151 |
+
action="store_true", default=False)
|
152 |
+
parser.add_argument('--test', help='Dry run test on pre-existing comment)',
|
153 |
+
action="store_true", default=False)
|
154 |
+
parser.add_argument('--sub', help='Pass submission string to process')
|
155 |
+
args = parser.parse_args()
|
156 |
+
if args.test:
|
157 |
+
print('Doing dry run test on submission')
|
158 |
+
if args.sub:
|
159 |
+
dryRunTest(args.sub)
|
160 |
+
else:
|
161 |
+
dryRunTest()
|
162 |
+
elif args.sub is not None:
|
163 |
+
runSpecificSubmission(args)
|
164 |
+
else:
|
165 |
+
main(args)
|
chessfenbot/dataset.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
# From https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/examples/tutorials/mnist/input_data.py
|
3 |
+
class DataSet(object):
|
4 |
+
def __init__(self, images, labels, dtype=tf.float32):
|
5 |
+
"""Construct a DataSet.
|
6 |
+
`dtype` can be either
|
7 |
+
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
|
8 |
+
`[0, 1]`.
|
9 |
+
"""
|
10 |
+
dtype = tf.as_dtype(dtype).base_dtype
|
11 |
+
|
12 |
+
if dtype not in (tf.uint8, tf.float32):
|
13 |
+
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
|
14 |
+
dtype)
|
15 |
+
assert images.shape[0] == labels.shape[0], (
|
16 |
+
'images.shape: %s labels.shape: %s' % (images.shape,
|
17 |
+
labels.shape))
|
18 |
+
self._num_examples = images.shape[0]
|
19 |
+
# Convert shape from [num examples, rows, columns, depth]
|
20 |
+
# to [num examples, rows*columns] (assuming depth == 1)
|
21 |
+
assert images.shape[3] == 1
|
22 |
+
images = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
|
23 |
+
if dtype == tf.float32:
|
24 |
+
# Convert from [0, 255] -> [0.0, 1.0].
|
25 |
+
images = images.astype(np.float32)
|
26 |
+
images = np.multiply(images, 1.0 / 255.0)
|
27 |
+
|
28 |
+
self._images = images
|
29 |
+
self._labels = labels
|
30 |
+
self._epochs_completed = 0
|
31 |
+
self._index_in_epoch = 0
|
32 |
+
@property
|
33 |
+
def images(self):
|
34 |
+
return self._images
|
35 |
+
@property
|
36 |
+
def labels(self):
|
37 |
+
return self._labels
|
38 |
+
@property
|
39 |
+
def num_examples(self):
|
40 |
+
return self._num_examples
|
41 |
+
@property
|
42 |
+
def epochs_completed(self):
|
43 |
+
return self._epochs_completed
|
44 |
+
def next_batch(self, batch_size):
|
45 |
+
"""Return the next `batch_size` examples from this data set."""
|
46 |
+
start = self._index_in_epoch
|
47 |
+
self._index_in_epoch += batch_size
|
48 |
+
if self._index_in_epoch > self._num_examples:
|
49 |
+
# Finished epoch
|
50 |
+
self._epochs_completed += 1
|
51 |
+
# Shuffle the data
|
52 |
+
perm = np.arange(self._num_examples)
|
53 |
+
np.random.shuffle(perm)
|
54 |
+
self._images = self._images[perm]
|
55 |
+
self._labels = self._labels[perm]
|
56 |
+
# Start next epoch
|
57 |
+
start = 0
|
58 |
+
self._index_in_epoch = batch_size
|
59 |
+
assert batch_size <= self._num_examples
|
60 |
+
end = self._index_in_epoch
|
61 |
+
return self._images[start:end], self._labels[start:end]
|
chessfenbot/example_input.png
ADDED
chessfenbot/helper_functions.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
# Imports for visualization
|
4 |
+
import PIL.Image
|
5 |
+
|
6 |
+
# DEBUG for ipython notebook visualizations.
|
7 |
+
# from IPython.display import clear_output, Image, display
|
8 |
+
|
9 |
+
# def display_array(a, fmt='jpeg', rng=[0,1]):
|
10 |
+
# """Display an array as a picture."""
|
11 |
+
# a = (a - rng[0])/float(rng[1] - rng[0]) # normalized float value
|
12 |
+
# a = np.uint8(np.clip(a*255, 0, 255))
|
13 |
+
# f = StringIO()
|
14 |
+
|
15 |
+
# PIL.Image.fromarray(np.asarray(a, dtype=np.uint8)).save(f, fmt)
|
16 |
+
# display(Image(data=f.getvalue()))
|
17 |
+
|
18 |
+
# def display_weight(a, fmt='jpeg', rng=[0,1]):
|
19 |
+
# """Display an array as a color picture."""
|
20 |
+
# a = (a - rng[0])/float(rng[1] - rng[0]) # normalized float value
|
21 |
+
# a = np.uint8(np.clip(a*255, 0, 255))
|
22 |
+
# f = StringIO()
|
23 |
+
|
24 |
+
# v = np.asarray(a, dtype=np.uint8)
|
25 |
+
|
26 |
+
# # blue is high intensity, red is low
|
27 |
+
# # Negative
|
28 |
+
# r = 255-v.copy()
|
29 |
+
# r[r<127] = 0
|
30 |
+
# r[r>=127] = 255
|
31 |
+
|
32 |
+
# # None
|
33 |
+
# g = np.zeros_like(v)
|
34 |
+
|
35 |
+
# # Positive
|
36 |
+
# b = v.copy()
|
37 |
+
# b[b<127] = 0
|
38 |
+
# b[b>=127] = 255
|
39 |
+
|
40 |
+
# #np.clip((v-127)/2,0,127)*2
|
41 |
+
|
42 |
+
# #-1 to 1
|
43 |
+
# intensity = np.abs(2.*a-1)
|
44 |
+
|
45 |
+
# rgb = np.uint8(np.dstack([r,g,b]*intensity))
|
46 |
+
|
47 |
+
# PIL.Image.fromarray(rgb).save(f, fmt)
|
48 |
+
# display(Image(data=f.getvalue(), width=100))
|
49 |
+
|
50 |
+
# def display_image(a, fmt='png'):
|
51 |
+
# """Display an image as a picture in-line."""
|
52 |
+
# f = StringIO()
|
53 |
+
|
54 |
+
# PIL.Image.fromarray(np.asarray(a, dtype=np.uint8)).save(f, fmt)
|
55 |
+
# display(Image(data=f.getvalue()))
|
56 |
+
|
57 |
+
# FEN related
|
58 |
+
def getFENtileLabel(fen,letter,number):
|
59 |
+
"""Given a fen string and a rank (number) and file (letter), return label vector"""
|
60 |
+
l2i = lambda l: ord(l)-ord('A') # letter to index
|
61 |
+
number = 8-number # FEN has order backwards
|
62 |
+
piece_letter = fen[number*8+number + l2i(letter)]
|
63 |
+
label = np.zeros(13, dtype=np.uint8)
|
64 |
+
label['1KQRBNPkqrbnp'.find(piece_letter)] = 1 # note the 1 instead of ' ' due to FEN notation
|
65 |
+
# We ignore shorter FENs with numbers > 1 because we generate the FENs ourselves
|
66 |
+
return label
|
67 |
+
|
68 |
+
# We'll define the 12 pieces and 1 spacewith single characters
|
69 |
+
# KQRBNPkqrbnp
|
70 |
+
def getLabelForSquare(letter,number):
|
71 |
+
"""Given letter and number (say 'B3'), return one-hot label vector
|
72 |
+
(12 pieces + 1 space == no piece, so 13-long vector)"""
|
73 |
+
l2i = lambda l: ord(l)-ord('A') # letter to index
|
74 |
+
piece2Label = lambda piece: ' KQRBNPkqrbnp'.find(piece)
|
75 |
+
# build mapping to index
|
76 |
+
# Starter position
|
77 |
+
starter_mapping = np.zeros([8,8], dtype=np.uint8)
|
78 |
+
starter_mapping[0, [l2i('A'), l2i('H')]] = piece2Label('R')
|
79 |
+
starter_mapping[0, [l2i('B'), l2i('G')]] = piece2Label('N')
|
80 |
+
starter_mapping[0, [l2i('C'), l2i('F')]] = piece2Label('B')
|
81 |
+
starter_mapping[0, l2i('D')] = piece2Label('Q')
|
82 |
+
starter_mapping[0, l2i('E')] = piece2Label('K')
|
83 |
+
starter_mapping[1, :] = piece2Label('P')
|
84 |
+
|
85 |
+
starter_mapping[7, [l2i('A'), l2i('H')]] = piece2Label('r')
|
86 |
+
starter_mapping[7, [l2i('B'), l2i('G')]] = piece2Label('n')
|
87 |
+
starter_mapping[7, [l2i('C'), l2i('F')]] = piece2Label('b')
|
88 |
+
starter_mapping[7, l2i('D')] = piece2Label('q')
|
89 |
+
starter_mapping[7, l2i('E')] = piece2Label('k')
|
90 |
+
starter_mapping[6, :] = piece2Label('p')
|
91 |
+
# Note: if we display the array, the first row is white,
|
92 |
+
# normally bottom, but arrays show it as top
|
93 |
+
|
94 |
+
# Generate one-hot label
|
95 |
+
label = np.zeros(13, dtype=np.uint8)
|
96 |
+
label[starter_mapping[number-1, l2i(letter), ]] = 1
|
97 |
+
return label
|
98 |
+
|
99 |
+
def name2Label(name):
|
100 |
+
"""Convert label vector into name of piece"""
|
101 |
+
return ' KQRBNPkqrbnp'.find(name)
|
102 |
+
|
103 |
+
def labelIndex2Name(label_index):
|
104 |
+
"""Convert label index into name of piece"""
|
105 |
+
return ' KQRBNPkqrbnp'[label_index]
|
106 |
+
|
107 |
+
def label2Name(label):
|
108 |
+
"""Convert label vector into name of piece"""
|
109 |
+
return labelIndex2Name(label.argmax())
|
110 |
+
|
111 |
+
def shortenFEN(fen):
|
112 |
+
"""Reduce FEN to shortest form (ex. '111p11Q' becomes '3p2Q')"""
|
113 |
+
return fen.replace('11111111','8').replace('1111111','7') \
|
114 |
+
.replace('111111','6').replace('11111','5') \
|
115 |
+
.replace('1111','4').replace('111','3').replace('11','2')
|
116 |
+
|
117 |
+
def lengthenFEN(fen):
|
118 |
+
"""Lengthen FEN to 71-character form (ex. '3p2Q' becomes '111p11Q')"""
|
119 |
+
return fen.replace('8','11111111').replace('7','1111111') \
|
120 |
+
.replace('6','111111').replace('5','11111') \
|
121 |
+
.replace('4','1111').replace('3','111').replace('2','11')
|
122 |
+
|
123 |
+
def unflipFEN(fen):
|
124 |
+
if len(fen) < 71:
|
125 |
+
fen = lengthenFEN(FEN)
|
126 |
+
return '/'.join([ r[::-1] for r in fen.split('/') ][::-1])
|
127 |
+
|
128 |
+
|
129 |
+
# For Training in IPython Notebooks
|
130 |
+
def loadFENtiles(image_filepaths):
|
131 |
+
"""Load Tiles with FEN string in filename for labels.
|
132 |
+
return both images and labels"""
|
133 |
+
# Each tile is a 32x32 grayscale image, add extra axis for working with MNIST Data format
|
134 |
+
images = np.zeros([image_filepaths.size, 32, 32, 1], dtype=np.uint8)
|
135 |
+
labels = np.zeros([image_filepaths.size, 13], dtype=np.float64)
|
136 |
+
|
137 |
+
for i, image_filepath in enumerate(image_filepaths):
|
138 |
+
if i % 1000 == 0:
|
139 |
+
#print("On #%d/%d : %s" % (i,image_filepaths.size, image_filepath))
|
140 |
+
print(".",)
|
141 |
+
|
142 |
+
# Image
|
143 |
+
images[i,:,:,0] = np.asarray(PIL.Image.open(image_filepath), dtype=np.uint8)
|
144 |
+
|
145 |
+
# Label
|
146 |
+
fen = image_filepath[-78:-7]
|
147 |
+
_rank = image_filepath[-6]
|
148 |
+
_file = int(image_filepath[-5])
|
149 |
+
labels[i,:] = getFENtileLabel(fen, _rank, _file)
|
150 |
+
print("Done")
|
151 |
+
return images, labels
|
152 |
+
|
153 |
+
def loadLabels(image_filepaths):
|
154 |
+
"""Load label vectors from list of image filepaths"""
|
155 |
+
# Each filepath contains which square we're looking at,
|
156 |
+
# since we're in starter position, we know which
|
157 |
+
# square has which piece, 12 distinct pieces
|
158 |
+
# (6 white and 6 black) and 1 as empty = 13 labels
|
159 |
+
training_data = np.zeros([image_filepaths.size, 13], dtype=np.float64)
|
160 |
+
for i, image_filepath in enumerate(image_filepaths):
|
161 |
+
training_data[i,:] = getLabelForSquare(image_filepath[-6],int(image_filepath[-5]))
|
162 |
+
return training_data
|
163 |
+
|
164 |
+
def loadImages(image_filepaths):
|
165 |
+
# Each tile is a 32x32 grayscale image, add extra axis for working with MNIST Data format
|
166 |
+
training_data = np.zeros([image_filepaths.size, 32, 32, 1], dtype=np.uint8)
|
167 |
+
for i, image_filepath in enumerate(image_filepaths):
|
168 |
+
if i % 100 == 0:
|
169 |
+
print("On #%d/%d : %s" % (i,image_filepaths.size, image_filepath))
|
170 |
+
img = PIL.Image.open(image_filepath)
|
171 |
+
training_data[i,:,:,0] = np.asarray(img, dtype=np.uint8)
|
172 |
+
return training_data
|
chessfenbot/helper_functions_chessbot.py
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
#
|
3 |
+
# Helper functions for the reddit chessbot
|
4 |
+
# Includes functions to parse FEN strings and get pithy quotes
|
5 |
+
import re
|
6 |
+
from helper_functions import lengthenFEN
|
7 |
+
from message_template import *
|
8 |
+
|
9 |
+
#########################################################
|
10 |
+
# ChessBot Message Generation Functions
|
11 |
+
|
12 |
+
def isPotentialChessboardTopic(sub):
|
13 |
+
"""if url is imgur link, or url ends in .png/.jpg/.gif"""
|
14 |
+
if sub.url == None:
|
15 |
+
return False
|
16 |
+
return ('imgur' in sub.url
|
17 |
+
or any([sub.url.lower().endswith(ending) for ending in ['.png', '.jpg', 'jpeg', '.gif']]))
|
18 |
+
|
19 |
+
def invert(fen):
|
20 |
+
return ''.join(reversed(fen))
|
21 |
+
|
22 |
+
def generateMessage(fen, certainty, side, visualize_link):
|
23 |
+
"""Generate response message using FEN, certainty and side for flipping link order"""
|
24 |
+
vals = {} # Holds template responses
|
25 |
+
|
26 |
+
# Things that don't rely on black/white to play
|
27 |
+
# FEN image link is aligned with screenshot, not side to play
|
28 |
+
if fen == '8/8/8/8/8/8/8/8':
|
29 |
+
# Empty chessboard link, fen-to-image doesn't correctly identify those
|
30 |
+
vals['unaligned_fen_img_link'] = 'http://i.stack.imgur.com/YxP53.gif'
|
31 |
+
else:
|
32 |
+
vals['unaligned_fen_img_link'] = 'http://www.fen-to-image.com/image/60/%s.png' % fen
|
33 |
+
vals['certainty'] = certainty*100.0 # to percentage
|
34 |
+
vals['pithy_message'] = getPithyMessage(certainty)
|
35 |
+
|
36 |
+
if side == 'b':
|
37 |
+
# Flip FEN if black to play, assumes image is flipped
|
38 |
+
fen = invert(fen)
|
39 |
+
|
40 |
+
inverted_fen = invert(fen)
|
41 |
+
|
42 |
+
# Get castling status based on pieces being in initial positions or not
|
43 |
+
castle_status = getCastlingStatus(fen)
|
44 |
+
inverted_castle_status = getCastlingStatus(inverted_fen)
|
45 |
+
|
46 |
+
# Fill out template and return
|
47 |
+
vals['fen_w'] = "%s w %s -" % (fen, castle_status)
|
48 |
+
vals['fen_b'] = "%s b %s -" % (fen, castle_status)
|
49 |
+
vals['inverted_fen_w'] = "%s w %s -" % (inverted_fen, inverted_castle_status)
|
50 |
+
vals['inverted_fen_b'] = "%s b %s -" % (inverted_fen, inverted_castle_status)
|
51 |
+
|
52 |
+
vals['lichess_analysis_w'] = 'https://www.lichess.org/analysis/%s_w_%s' % (fen, castle_status)
|
53 |
+
vals['lichess_analysis_b'] = 'https://www.lichess.org/analysis/%s_b_%s' % (fen, castle_status)
|
54 |
+
vals['lichess_editor_w'] = 'https://www.lichess.org/editor/%s_w_%s' % (fen, castle_status)
|
55 |
+
vals['lichess_editor_b'] = 'https://www.lichess.org/editor/%s_b_%s' % (fen, castle_status)
|
56 |
+
|
57 |
+
vals['inverted_lichess_analysis_w'] = 'https://www.lichess.org/analysis/%s_w_%s' % (inverted_fen, inverted_castle_status)
|
58 |
+
vals['inverted_lichess_analysis_b'] = 'https://www.lichess.org/analysis/%s_b_%s' % (inverted_fen, inverted_castle_status)
|
59 |
+
vals['inverted_lichess_editor_w'] = 'https://www.lichess.org/editor/%s_w_%s' % (inverted_fen, inverted_castle_status)
|
60 |
+
vals['inverted_lichess_editor_b'] = 'https://www.lichess.org/editor/%s_b_%s' % (inverted_fen, inverted_castle_status)
|
61 |
+
|
62 |
+
vals['visualize_link'] = visualize_link
|
63 |
+
|
64 |
+
return MESSAGE_TEMPLATE.format(**vals)
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
# Add a little message based on certainty of response
|
69 |
+
def getPithyMessage(certainty):
|
70 |
+
pithy_messages = [
|
71 |
+
'*[\[ ◕ _ ◕\]^*> ... \[⌐■ _ ■\]^*](http://i.imgur.com/yaVftzT.jpg)*',
|
72 |
+
'A+ ✓',
|
73 |
+
'✓',
|
74 |
+
'[Close.](http://i.imgur.com/SwKKZlD.jpg)',
|
75 |
+
'[WAI](http://gfycat.com/RightHalfIndianglassfish)',
|
76 |
+
'[:(](http://i.imgur.com/BNwca4R.gifv)',
|
77 |
+
'[I tried.](http://i.imgur.com/kmmp0lc.png)',
|
78 |
+
'[Wow.](http://i.imgur.com/67fZDh9.webm)']
|
79 |
+
pithy_messages_cutoffs = [0.999995, 0.99, 0.9, 0.8, 0.7, 0.5, 0.2, 0.0]
|
80 |
+
|
81 |
+
for cuttoff, pithy_message in zip(pithy_messages_cutoffs, pithy_messages):
|
82 |
+
if certainty >= cuttoff:
|
83 |
+
return pithy_message
|
84 |
+
|
85 |
+
return ""
|
86 |
+
|
87 |
+
def getSideToPlay(title, fen):
|
88 |
+
"""Based on post title return 'w', 'b', or predict from FEN"""
|
89 |
+
title = title.lower()
|
90 |
+
# Return if 'black' in title unless 'white to' is, and vice versa, or predict if neither
|
91 |
+
if 'black' in title:
|
92 |
+
if 'white to' in title:
|
93 |
+
return 'w'
|
94 |
+
return 'b'
|
95 |
+
elif 'white' in title:
|
96 |
+
if 'black to' in title:
|
97 |
+
return 'b'
|
98 |
+
return 'w'
|
99 |
+
else:
|
100 |
+
# Predict side from fen (always returns 'w' or 'b', default 'w')
|
101 |
+
return predictSideFromFEN(fen)
|
102 |
+
|
103 |
+
def predictSideFromFEN(fen):
|
104 |
+
"""Returns which side it thinks FEN is looking from.
|
105 |
+
Checks number of white and black pieces on either side to determine
|
106 |
+
i.e if more black pieces are on 1-4th ranks, then black to play"""
|
107 |
+
|
108 |
+
# remove spaces values (numbers) from fen
|
109 |
+
fen = re.sub('\d','',fen)
|
110 |
+
|
111 |
+
#split fen to top half and bottom half (top half first)
|
112 |
+
parts = fen.split('/')
|
113 |
+
top = list(''.join(parts[:4]))
|
114 |
+
bottom = list(''.join(parts[4:]))
|
115 |
+
|
116 |
+
# If screenshot is aligned from POV of white to play, we'd expect
|
117 |
+
# top to be mostly black pieces (lowercase)
|
118 |
+
# and bottom to be mostly white pieces (uppercase), so lets count
|
119 |
+
top_count_white = sum(list(map(lambda x: ord(x) <= ord('Z'), top)))
|
120 |
+
bottom_count_white = sum(list(map(lambda x: ord(x) <= ord('Z'), bottom)))
|
121 |
+
|
122 |
+
top_count_black = sum(list(map(lambda x: ord(x) >= ord('a'), top)))
|
123 |
+
bottom_count_black = sum(list(map(lambda x: ord(x) >= ord('a'), bottom)))
|
124 |
+
|
125 |
+
# If more white pieces on top side, or more black pieces on bottom side, black to play
|
126 |
+
if (top_count_white > bottom_count_white or top_count_black < bottom_count_black):
|
127 |
+
return 'b'
|
128 |
+
|
129 |
+
# Otherwise white
|
130 |
+
return 'w'
|
131 |
+
|
132 |
+
def getCastlingStatus(fen):
|
133 |
+
"""Check FEN to see if castling is allowed based on initial positions.
|
134 |
+
Returns 'KQkq' variants or '-' if no castling."""
|
135 |
+
|
136 |
+
fen = lengthenFEN(fen) # 71-char long fen
|
137 |
+
# rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR : initial position
|
138 |
+
# 01234567 01234567 +63
|
139 |
+
|
140 |
+
status = ['','','',''] # KQkq
|
141 |
+
# Check if black king can castle
|
142 |
+
if fen[4] == 'k':
|
143 |
+
# long (q)
|
144 |
+
if fen[0] == 'r':
|
145 |
+
status[3] = 'q'
|
146 |
+
if fen[7] == 'r':
|
147 |
+
status[2] = 'k'
|
148 |
+
# Check if white king can castle
|
149 |
+
if fen[63+4] == 'K':
|
150 |
+
# long (Q)
|
151 |
+
if fen[63+0] == 'R':
|
152 |
+
status[1] = 'Q'
|
153 |
+
if fen[63+7] == 'R':
|
154 |
+
status[0] = 'K'
|
155 |
+
|
156 |
+
status = ''.join(status)
|
157 |
+
return status if status else '-'
|
158 |
+
|
159 |
+
def getFENtileLetter(fen,letter,number):
|
160 |
+
"""Given a fen string and a rank (number) and file (letter), return piece letter"""
|
161 |
+
l2i = lambda l: ord(l)-ord('A') # letter to index
|
162 |
+
piece_letter = fen[(8-number)*8+(8-number) + l2i(letter)]
|
163 |
+
return ' KQRBNPkqrbnp'.find(piece_letter)
|
chessfenbot/helper_image_loading.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
# Imports for visualization
|
4 |
+
import PIL.Image
|
5 |
+
from io import BytesIO
|
6 |
+
try:
|
7 |
+
# Python 3
|
8 |
+
from urllib.request import urlopen, Request
|
9 |
+
from urllib.parse import quote
|
10 |
+
except ImportError:
|
11 |
+
# Python 2
|
12 |
+
from urllib2 import urlopen, Request
|
13 |
+
from urllib2 import quote
|
14 |
+
|
15 |
+
|
16 |
+
# Imports for pulling metadata from imgur url
|
17 |
+
import requests
|
18 |
+
from bs4 import BeautifulSoup
|
19 |
+
|
20 |
+
# All images are returned as PIL images, not numpy arrays
|
21 |
+
def loadImageGrayscale(img_file):
|
22 |
+
"""Load image from file, convert to grayscale float32 numpy array"""
|
23 |
+
img = PIL.Image.open(img_file)
|
24 |
+
|
25 |
+
# Convert to grayscale and return
|
26 |
+
return img.convert("L")
|
27 |
+
|
28 |
+
def loadImageFromURL(url, max_size_bytes=4000000):
|
29 |
+
"""Load image from url.
|
30 |
+
|
31 |
+
If the url has more data than max_size_bytes, fail out
|
32 |
+
Try and update with metadata url link if an imgur link"""
|
33 |
+
|
34 |
+
# If imgur try to load from metadata
|
35 |
+
url = tryUpdateImgurURL(url)
|
36 |
+
|
37 |
+
# Try loading image from url directly
|
38 |
+
try:
|
39 |
+
req = Request(url, headers={'User-Agent' : "TensorFlow Chessbot"})
|
40 |
+
con = urlopen(req)
|
41 |
+
# Load up to max_size_bytes of data from url
|
42 |
+
data = con.read(max_size_bytes)
|
43 |
+
# If there is more, image is too big, skip
|
44 |
+
if len(con.read(1)) != 0:
|
45 |
+
print("Skipping, url data larger than %d bytes" % max_size_bytes)
|
46 |
+
return None, url
|
47 |
+
|
48 |
+
# Process into PIL image
|
49 |
+
img = PIL.Image.open(BytesIO(data))
|
50 |
+
# Return PIL image and url used
|
51 |
+
return img, url
|
52 |
+
except IOError as e:
|
53 |
+
# Return None on failure to load image from url
|
54 |
+
return None, url
|
55 |
+
|
56 |
+
def tryUpdateImgurURL(url):
|
57 |
+
"""Try to get actual image url from imgur metadata"""
|
58 |
+
if 'imgur' not in url: # Only attempt on urls that have imgur in it
|
59 |
+
return url
|
60 |
+
|
61 |
+
soup = BeautifulSoup(requests.get(url).content, "lxml")
|
62 |
+
|
63 |
+
# Get metadata tags
|
64 |
+
meta = soup.find_all('meta')
|
65 |
+
# Get the specific tag, ex.
|
66 |
+
# <meta content="https://i.imgur.com/bStt0Fuh.jpg" name="twitter:image"/>
|
67 |
+
tags = list(filter(lambda tag: 'name' in tag.attrs and tag.attrs['name'] == "twitter:image", meta))
|
68 |
+
|
69 |
+
if tags:
|
70 |
+
# Replace url with metadata url
|
71 |
+
url = tags[0]['content']
|
72 |
+
|
73 |
+
return url
|
74 |
+
|
75 |
+
def loadImageFromPath(img_path):
|
76 |
+
"""Load PIL image from image filepath, keep as color"""
|
77 |
+
return PIL.Image.open(open(img_path,'rb'))
|
78 |
+
|
79 |
+
|
80 |
+
def resizeAsNeeded(img, max_size=(2000,2000), max_fail_size=(2000,2000)):
|
81 |
+
if not PIL.Image.isImageType(img):
|
82 |
+
img = PIL.Image.fromarray(img) # Convert to PIL Image if not already
|
83 |
+
|
84 |
+
# If image is larger than fail size, don't try resizing and give up
|
85 |
+
if img.size[0] > max_fail_size[0] or img.size[1] > max_fail_size[1]:
|
86 |
+
return None
|
87 |
+
|
88 |
+
"""Resize if image larger than max size"""
|
89 |
+
if img.size[0] > max_size[0] or img.size[1] > max_size[1]:
|
90 |
+
print("Image too big (%d x %d)" % (img.size[0], img.size[1]))
|
91 |
+
new_size = np.min(max_size) # px
|
92 |
+
if img.size[0] > img.size[1]:
|
93 |
+
# resize by width to new limit
|
94 |
+
ratio = np.float(new_size) / img.size[0]
|
95 |
+
else:
|
96 |
+
# resize by height
|
97 |
+
ratio = np.float(new_size) / img.size[1]
|
98 |
+
print("Reducing by factor of %.2g" % (1./ratio))
|
99 |
+
new_size = (np.array(img.size) * ratio).astype(int)
|
100 |
+
print("New size: (%d x %d)" % (new_size[0], new_size[1]))
|
101 |
+
img = img.resize(new_size, PIL.Image.BILINEAR)
|
102 |
+
return img
|
103 |
+
|
104 |
+
def getVisualizeLink(corners, url):
|
105 |
+
"""Return online link to visualize found corners for url"""
|
106 |
+
encoded_url = quote(url, safe='')
|
107 |
+
|
108 |
+
return ("http://tetration.xyz/tensorflow_chessbot/overlay_chessboard.html?%d,%d,%d,%d,%s" %
|
109 |
+
(corners[0], corners[1], corners[2], corners[3], encoded_url))
|
chessfenbot/helper_webkit2png.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Wrapper on webkit2png to render chessboard layouts from lichess
|
2 |
+
|
3 |
+
import sys
|
4 |
+
from PyQt4.QtCore import *
|
5 |
+
from PyQt4.QtGui import *
|
6 |
+
from PyQt4.QtWebKit import *
|
7 |
+
from PyQt4.QtNetwork import *
|
8 |
+
|
9 |
+
# Download webkit2png from here and place in same directory
|
10 |
+
# https://github.com/adamn/python-webkit2png/tree/master/webkit2png
|
11 |
+
#
|
12 |
+
import webkit2png
|
13 |
+
|
14 |
+
######################################################3
|
15 |
+
# For scraping website screenshots
|
16 |
+
class Options():
|
17 |
+
url = None
|
18 |
+
output_filename = None
|
19 |
+
cookie = None
|
20 |
+
|
21 |
+
|
22 |
+
class ChessScreenshotServer():
|
23 |
+
"""docstring for ChessScreenshotServer"""
|
24 |
+
def __init__(self, url=None, output_filename=None):
|
25 |
+
self.options = Options()
|
26 |
+
self.options.url = url
|
27 |
+
self.options.output_filename = output_filename
|
28 |
+
|
29 |
+
self.app = self.init_qtgui()
|
30 |
+
|
31 |
+
def init_qtgui(self, display=None, style=None, qtargs=None):
|
32 |
+
"""Initiates the QApplication environment using the given args."""
|
33 |
+
if QApplication.instance():
|
34 |
+
print ("QApplication has already been instantiated.\n"
|
35 |
+
"Ignoring given arguments and returning existing QApplication.")
|
36 |
+
return QApplication.instance()
|
37 |
+
|
38 |
+
qtargs2 = [sys.argv[0]]
|
39 |
+
qtargs2.extend(qtargs or [])
|
40 |
+
|
41 |
+
return QApplication(qtargs2)
|
42 |
+
|
43 |
+
def renderScreenshotToFile(self):
|
44 |
+
"""This is run within QT"""
|
45 |
+
try:
|
46 |
+
renderer = webkit2png.WebkitRenderer()
|
47 |
+
# renderer.wait = 5
|
48 |
+
renderer.qWebSettings[QWebSettings.JavascriptEnabled] = True # Enable javascript
|
49 |
+
if self.options.cookie:
|
50 |
+
renderer.cookies = [self.options.cookie]
|
51 |
+
with open(self.options.output_filename, 'w') as f:
|
52 |
+
renderer.render_to_file(res=self.options.url, file_object=f)
|
53 |
+
print "\tSaved screenshot to '%s'" % f.name
|
54 |
+
QApplication.exit(0)
|
55 |
+
except RuntimeError, e:
|
56 |
+
print "Error:", e
|
57 |
+
print >> sys.stderr, e
|
58 |
+
QApplication.exit(1)
|
59 |
+
|
60 |
+
|
61 |
+
def takeScreenshot(self, url=None, output_filename=None):
|
62 |
+
if url:
|
63 |
+
self.options.url = url
|
64 |
+
if output_filename:
|
65 |
+
self.options.output_filename = output_filename
|
66 |
+
|
67 |
+
QTimer.singleShot(0, self.renderScreenshotToFile)
|
68 |
+
return self.app.exec_()
|
69 |
+
|
70 |
+
def takeChessScreenshot(self, fen_string=None, output_filename=None,
|
71 |
+
cookie=None):
|
72 |
+
"""Take uncropped screenshot of lichess board of FEN string and save to file"""
|
73 |
+
url_template = "http://en.lichess.org/editor/%s"
|
74 |
+
if cookie:
|
75 |
+
self.options.cookie = cookie
|
76 |
+
return self.takeScreenshot(url_template % fen_string, output_filename)
|
chessfenbot/message_template.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
# Response message template
|
3 |
+
MESSAGE_TEMPLATE = """[◕ _ ◕]^*
|
4 |
+
|
5 |
+
I attempted to generate a [chessboard layout]({unaligned_fen_img_link}) from the posted image[^(what I saw)]({visualize_link}),
|
6 |
+
with a certainty of **{certainty:.3f}%**. *{pithy_message}*
|
7 |
+
|
8 |
+
-
|
9 |
+
|
10 |
+
◇ White to play : [Analysis]({lichess_analysis_w}) | [Editor]({lichess_editor_w})
|
11 |
+
`{fen_w}`
|
12 |
+
|
13 |
+
-
|
14 |
+
|
15 |
+
◆ Black to play : [Analysis]({lichess_analysis_b}) | [Editor]({lichess_editor_b})
|
16 |
+
`{fen_b}`
|
17 |
+
|
18 |
+
-
|
19 |
+
|
20 |
+
> ▾ Links for when pieces are inverted on the board:
|
21 |
+
>
|
22 |
+
> White to play : [Analysis]({inverted_lichess_analysis_w}) | [Editor]({inverted_lichess_editor_w})
|
23 |
+
> `{inverted_fen_w}`
|
24 |
+
>
|
25 |
+
> Black to play : [Analysis]({inverted_lichess_analysis_b}) | [Editor]({inverted_lichess_editor_b})
|
26 |
+
> `{inverted_fen_b}`
|
27 |
+
|
28 |
+
-
|
29 |
+
|
30 |
+
|
31 |
+
---
|
32 |
+
|
33 |
+
^(Yes I am a machine learning bot | )
|
34 |
+
[^(`How I work`)](http://github.com/Elucidation/tensorflow_chessbot 'Must go deeper')
|
35 |
+
^( | )[^(`Try your own images`)](http://tetration.xyz/ChessboardFenTensorflowJs/)
|
36 |
+
^( | Reply with a corrected FEN to add to my next training dataset)
|
37 |
+
|
38 |
+
"""
|
chessfenbot/readme.md
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Cloned from: https://github.com/Elucidation/tensorflow_chessbot/tree/chessfenbot
|
2 |
+
|
3 |
+
TensorFlow Chessbot - /u/ChessFenBot [◕ _ ◕]<sup>\* *I make FENs*</sup>
|
4 |
+
---
|
5 |
+
## Command Line Interface (CLI)
|
6 |
+
|
7 |
+
### Setting up the virtual environment
|
8 |
+
|
9 |
+
This uses Python 3, pip3 and virtualenv, if you don't have these installed you can use:
|
10 |
+
|
11 |
+
```
|
12 |
+
sudo apt-get install python3-pip
|
13 |
+
sudo pip3 install virtualenv
|
14 |
+
```
|
15 |
+
|
16 |
+
Then, create a new virtual environment, source it, and install the dependencies from `requirements.txt`.
|
17 |
+
|
18 |
+
```
|
19 |
+
virtualenv venv
|
20 |
+
source venv/bin/activate
|
21 |
+
pip3 install -r requirements.txt
|
22 |
+
```
|
23 |
+
|
24 |
+
### Running the CLI
|
25 |
+
|
26 |
+
`tensorflow_chessbot.py` contains the library and script for running predictions on images passed by file or url.
|
27 |
+
|
28 |
+
```
|
29 |
+
$ ./tensorflow_chessbot.py -h
|
30 |
+
usage: tensorflow_chessbot.py [-h] [--url URL] [--filepath FILEPATH]
|
31 |
+
|
32 |
+
Predict a chessboard FEN from supplied local image link or URL
|
33 |
+
|
34 |
+
optional arguments:
|
35 |
+
-h, --help show this help message and exit
|
36 |
+
--url URL URL of image (ex. http://imgur.com/u4zF5Hj.png)
|
37 |
+
--filepath FILEPATH filepath to image (ex. u4zF5Hj.png)
|
38 |
+
```
|
39 |
+
|
40 |
+
For example to run on the provided `example_input.png` ![example_input](example_input.png)
|
41 |
+
|
42 |
+
```
|
43 |
+
./tensorflow_chessbot.py --filepath example_input.png
|
44 |
+
```
|
45 |
+
|
46 |
+
Should output something like:
|
47 |
+
|
48 |
+
```
|
49 |
+
(venv) $ ./tensorflow_chessbot.py --filepath example_input.png
|
50 |
+
|
51 |
+
--- Prediction on file example_input.png ---
|
52 |
+
Loading model 'saved_models/frozen_model.pb'
|
53 |
+
Model restored.
|
54 |
+
Closing session.
|
55 |
+
Per-tile certainty:
|
56 |
+
[[1. 1. 1. 1. 1. 1. 1. 1.]
|
57 |
+
[1. 1. 1. 1. 1. 1. 1. 1.]
|
58 |
+
[1. 1. 1. 1. 1. 1. 1. 1.]
|
59 |
+
[1. 1. 1. 1. 1. 1. 1. 1.]
|
60 |
+
[1. 1. 1. 1. 1. 1. 1. 1.]
|
61 |
+
[1. 1. 1. 1. 1. 1. 1. 1.]
|
62 |
+
[1. 1. 1. 1. 1. 1. 1. 1.]
|
63 |
+
[1. 1. 1. 1. 1. 1. 1. 1.]]
|
64 |
+
Certainty range [0.999975 - 1], Avg: 0.999997
|
65 |
+
---
|
66 |
+
Predicted FEN: bn4kN/p5bp/1p3npB/3p4/8/5Q2/PPP2PPP/R3R1K1
|
67 |
+
Final Certainty: 100.0%
|
68 |
+
```
|
69 |
+
|
70 |
+
Which would be ![predicted](http://www.fen-to-image.com/image/60/bn4kN/p5bp/1p3npB/3p4/8/5Q2/PPP2PPP/R3R1K1.png)
|
71 |
+
|
72 |
+
|
73 |
+
## Reddit Bot
|
74 |
+
|
75 |
+
[/u/ChessFenBot](https://www.reddit.com/user/ChessFenBot) will automatically reply to [reddit /r/chess](https://www.reddit.com/r/) new topic image posts that contain detectable online chessboard screenshots. A screenshot either ends in `.png`, `.jpg`, `.gif`, or is an `imgur` link.
|
76 |
+
|
77 |
+
It replies with a [lichess](http://www.lichess.org) analysis link for that layout and a predicted [FEN](https://en.wikipedia.org/wiki/Forsyth%E2%80%93Edwards_Notation).
|
78 |
+
|
79 |
+
```py
|
80 |
+
predictor = ChessboardPredictor()
|
81 |
+
fen, certainty = predictor.makePrediction('http://imgur.com/u4zF5Hj.png')
|
82 |
+
print "Predicted FEN: %s" % fen
|
83 |
+
print "Certainty: %.1f%%" % (certainty*100)
|
84 |
+
```
|
85 |
+
|
86 |
+
```
|
87 |
+
Certainty range [0.999545 - 1], Avg: 0.999977, Overall: 0.998546
|
88 |
+
Predicted FEN: 8/5p2/5k1P/2p4P/1p1p4/8/3K4/8
|
89 |
+
Certainty: 99.9%
|
90 |
+
Done
|
91 |
+
[Finished in 1.8s]
|
92 |
+
```
|
93 |
+
|
94 |
+
ChessFenBot automatically replied to [this reddit post](https://www.reddit.com/r/chess/comments/45osos/very_difficult_find_the_best_move_for_white/d004cg6?context=3), it processed the [screenshot link url](http://i.imgur.com/HnWYt8A.png) and responded with:
|
95 |
+
|
96 |
+
> ChessFenBot [◕ _ ◕]<sup>\* *I make FENs*</sup>
|
97 |
+
>
|
98 |
+
> ---
|
99 |
+
>
|
100 |
+
> I attempted to generate a chessboard layout from the posted image, with an overall certainty of **99.9916%**.
|
101 |
+
>
|
102 |
+
> FEN: [1nkr4/1p3q1p/pP4pn/P1r5/3N1p2/2b2B1P/5PPB/2RQ1RK1](http://www.fen-to-image.com/image/30/1nkr1111/1p111q1p/pP1111pn/P1r11111/111N1p11/11b11B1P/11111PPB/11RQ1RK1.png)
|
103 |
+
>
|
104 |
+
> Here is a link to a [Lichess Analysis](http://www.lichess.org/analysis/1nkr4/1p3q1p/pP4pn/P1r5/3N1p2/2b2B1P/5PPB/2RQ1RK1_w) - White to play
|
105 |
+
>
|
106 |
+
> ---
|
107 |
+
>
|
108 |
+
> <sup>Yes I am a machine learning bot | [`How I work`](https://github.com/Elucidation/tensorflow_chessbot 'Must go deeper') | Reply with a corrected FEN or [Editor link)](http://www.lichess.org/editor/r1b1r1k1/5pp1/p1pR1nNp/8/2B5/2q5/P1P1Q1PP/5R1K) to add to my next training dataset</sup>
|
109 |
+
|
110 |
+
## Running with Docker
|
111 |
+
|
112 |
+
Automated build on Docker available at `elucidation/tensorflow_chessbot`
|
113 |
+
|
114 |
+
Populate your own `auth_config.py` which has the form
|
115 |
+
|
116 |
+
```py
|
117 |
+
USERNAME='<NAME>'
|
118 |
+
PASSWORD='<PASSWORD>'
|
119 |
+
USER_AGENT='<AGENT INFO>'
|
120 |
+
```
|
121 |
+
|
122 |
+
|
123 |
+
Then you can download and run the docker image passing this config file using:
|
124 |
+
|
125 |
+
```
|
126 |
+
docker run -dt --rm --name cfb -v <local_auth_file>:/tcb/auth_config.py elucidation/tensorflow_chessbot
|
127 |
+
```
|
chessfenbot/requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
python=3.7
|
2 |
+
beautifulsoup4>=4.6.3,<5
|
3 |
+
lxml>=4.2.4,<5
|
4 |
+
Pillow>=5.2.0,<6
|
5 |
+
tensorflow>=1.5.0,<2
|
6 |
+
requests<3,>=2.21.0
|
7 |
+
gradio==3.0.19
|
chessfenbot/run_chessbot.sh
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
echo "Currently in `pwd`"
|
3 |
+
log_name=`date +"%F_%H-%M-%S"`
|
4 |
+
python -u ./chessbot.py > ./out_$log_name.log 2> ./error_$log_name.log
|
chessfenbot/save_graph.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
# Generate graph.pb and graph.pbtxt
|
4 |
+
|
5 |
+
import os
|
6 |
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # Ignore Tensorflow INFO debug messages
|
7 |
+
import tensorflow as tf
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
# Output graph to the same directory as the checkpoint.
|
11 |
+
output_graph = "saved_models/graph.pb"
|
12 |
+
output_graphtxt = ('saved_models', 'graph.pbtxt')
|
13 |
+
|
14 |
+
# Set up a fresh session and create the model and load it from the saved checkpoint.
|
15 |
+
tf.reset_default_graph() # clear out graph.
|
16 |
+
sess = tf.Session()
|
17 |
+
|
18 |
+
model_path='saved_models/model_10000.ckpt'
|
19 |
+
|
20 |
+
def weight_variable(shape, name=""):
|
21 |
+
initial = tf.truncated_normal(shape, stddev=0.1)
|
22 |
+
return tf.Variable(initial, name)
|
23 |
+
|
24 |
+
def bias_variable(shape, name=""):
|
25 |
+
initial = tf.constant(0.1, shape=shape)
|
26 |
+
return tf.Variable(initial, name)
|
27 |
+
|
28 |
+
def conv2d(x, W):
|
29 |
+
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
|
30 |
+
|
31 |
+
def max_pool_2x2(x, name=""):
|
32 |
+
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
|
33 |
+
strides=[1, 2, 2, 1], padding='SAME', name=name)
|
34 |
+
|
35 |
+
x = tf.placeholder(tf.float32, [None, 32*32], 'Input')
|
36 |
+
|
37 |
+
# First layer : 32 features
|
38 |
+
W_conv1 = weight_variable([5, 5, 1, 32], name='W1')
|
39 |
+
b_conv1 = bias_variable([32], name='B1')
|
40 |
+
|
41 |
+
x_image = tf.reshape(x, [-1,32,32,1])
|
42 |
+
|
43 |
+
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1, name='Conv1')
|
44 |
+
h_pool1 = max_pool_2x2(h_conv1, name='Pool1')
|
45 |
+
|
46 |
+
# Second convolutional layer : 64 features
|
47 |
+
W_conv2 = weight_variable([5, 5, 32, 64], name='W2')
|
48 |
+
b_conv2 = bias_variable([64], name='B2')
|
49 |
+
|
50 |
+
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2, name='Conv2')
|
51 |
+
h_pool2 = max_pool_2x2(h_conv2, name='Pool2')
|
52 |
+
|
53 |
+
# Densely connected layer : 1024 neurons, image size now 8x8
|
54 |
+
W_fc1 = weight_variable([8 * 8 * 64, 1024], name='W3')
|
55 |
+
b_fc1 = bias_variable([1024], name='B3')
|
56 |
+
|
57 |
+
h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*64], name='Pool3')
|
58 |
+
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1, 'MatMult3')
|
59 |
+
|
60 |
+
# Dropout
|
61 |
+
keep_prob = tf.placeholder("float", name='KeepProb')
|
62 |
+
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob, name='Drop4')
|
63 |
+
|
64 |
+
# Readout layer : softmax, 13 features
|
65 |
+
W_fc2 = weight_variable([1024, 13], name='W5')
|
66 |
+
b_fc2 = bias_variable([13], name='B5')
|
67 |
+
|
68 |
+
# Probabilities
|
69 |
+
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2, name='probabilities')
|
70 |
+
|
71 |
+
# Final prediction
|
72 |
+
prediction = tf.argmax(y_conv,1, name='prediction')
|
73 |
+
|
74 |
+
# Ground truth labels if exist
|
75 |
+
y_ = tf.placeholder(tf.float32, [None, 13], name='Ytruth')
|
76 |
+
actual_answer = tf.argmax(y_,1, name='actual')
|
77 |
+
|
78 |
+
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv), name='CrossEntropy')
|
79 |
+
|
80 |
+
# train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)
|
81 |
+
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
|
82 |
+
|
83 |
+
correct_prediction = tf.equal(prediction, actual_answer, name='CorrectPrediction')
|
84 |
+
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"), name='Accuracy')
|
85 |
+
|
86 |
+
# Add ops to save and restore all the variables.
|
87 |
+
saver = tf.train.Saver()
|
88 |
+
|
89 |
+
# Restore model from checkpoint
|
90 |
+
print("\t Loading model '%s'" % model_path)
|
91 |
+
saver.restore(sess, model_path)
|
92 |
+
print("\t Model restored.")
|
93 |
+
|
94 |
+
# Write graph in text format
|
95 |
+
tf.train.write_graph(sess.graph_def,output_graphtxt[0], output_graphtxt[1])
|
96 |
+
|
97 |
+
# To freeze graph then use:
|
98 |
+
# python3 -m tensorflow.python.tools.freeze_graph --input_graph graph.pbtxt --input_checkpoint=model_10000.ckpt --input_binary=false --output_graph=actual_frozen.pb --output_node_names=prediction,probabilities
|
99 |
+
|
100 |
+
# We also save the binary-encoded graph that may or may not be frozen (TBD) below.
|
101 |
+
# We use a built-in TF helper to export variables to constants
|
102 |
+
output_graph_def = tf.graph_util.convert_variables_to_constants(
|
103 |
+
sess, # The session is used to retrieve the weights
|
104 |
+
tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes
|
105 |
+
["prediction", "probabilities"] # The output node names are used to select the useful nodes
|
106 |
+
)
|
107 |
+
|
108 |
+
# Finally we serialize and dump the output graph to the filesystem
|
109 |
+
with tf.gfile.GFile(output_graph, "wb") as f:
|
110 |
+
f.write(output_graph_def.SerializeToString())
|
111 |
+
print("%d ops in the final graph." % len(output_graph_def.node))
|
chessfenbot/saved_models/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
chessfenbot/saved_models/cf_v1.0.tflite
ADDED
File without changes
|
chessfenbot/saved_models/checkpoint
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
model_checkpoint_path: "saved_models/model_10000.ckpt"
|
2 |
+
all_model_checkpoint_paths: "saved_models/model_10000.ckpt"
|
chessfenbot/saved_models/frozen_graph.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4dd6247d76600d3a686ce67ebea0aef382a8feed94a7f5c968feb5eac4add7cf
|
3 |
+
size 17046610
|
chessfenbot/saved_models/graph.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f17d70feda7e2c549fc1af23adab6fbb56e57e70583ed1ad855e4bf8e47d1c6b
|
3 |
+
size 17046610
|
chessfenbot/saved_models/graph.pbtxt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
chessfenbot/saved_models/model_10000.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0b7fa9a030353a743cdcf4fee1a8834fc62f11414464ae8dd474bea0dd804de1
|
3 |
+
size 51131512
|
chessfenbot/tensorflow_chessbot.py
ADDED
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
#
|
4 |
+
# TensorFlow Chessbot
|
5 |
+
# This contains ChessboardPredictor, the class responsible for loading and
|
6 |
+
# running a trained CNN on chessboard screenshots. Used by chessbot.py.
|
7 |
+
# A CLI interface is provided as well.
|
8 |
+
#
|
9 |
+
# $ ./tensorflow_chessbot.py -h
|
10 |
+
# usage: tensorflow_chessbot.py [-h] [--url URL] [--filepath FILEPATH]
|
11 |
+
#
|
12 |
+
# Predict a chessboard FEN from supplied local image link or URL
|
13 |
+
#
|
14 |
+
# optional arguments:
|
15 |
+
# -h, --help show this help message and exit
|
16 |
+
# --url URL URL of image (ex. http://imgur.com/u4zF5Hj.png)
|
17 |
+
# --filepath FILEPATH filepath to image (ex. u4zF5Hj.png)
|
18 |
+
#
|
19 |
+
# This file is used by chessbot.py, a Reddit bot that listens on /r/chess for
|
20 |
+
# posts with an image in it (perhaps checking also for a statement
|
21 |
+
# "white/black to play" and an image link)
|
22 |
+
#
|
23 |
+
# It then takes the image, uses some CV to find a chessboard on it, splits it up
|
24 |
+
# into a set of images of squares. These are the inputs to the tensorflow CNN
|
25 |
+
# which will return probability of which piece is on it (or empty)
|
26 |
+
#
|
27 |
+
# Dataset will include chessboard squares from chess.com, lichess
|
28 |
+
# Different styles of each, all the pieces
|
29 |
+
#
|
30 |
+
# Generate synthetic data via added noise:
|
31 |
+
# * change in coloration
|
32 |
+
# * highlighting
|
33 |
+
# * occlusion from lines etc.
|
34 |
+
#
|
35 |
+
# Take most probable set from TF response, use that to generate a FEN of the
|
36 |
+
# board, and bot comments on thread with FEN and link to lichess analysis.
|
37 |
+
#
|
38 |
+
# A lot of tensorflow code here is heavily adopted from the
|
39 |
+
# [tensorflow tutorials](https://www.tensorflow.org/versions/0.6.0/tutorials/pdes/index.html)
|
40 |
+
|
41 |
+
import os
|
42 |
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # Ignore Tensorflow INFO debug messages
|
43 |
+
import tensorflow as tf
|
44 |
+
import numpy as np
|
45 |
+
|
46 |
+
from .helper_functions import *
|
47 |
+
from .helper_image_loading import *
|
48 |
+
from .chessboard_finder import *
|
49 |
+
|
50 |
+
def load_graph(frozen_graph_filepath):
|
51 |
+
# Load and parse the protobuf file to retrieve the unserialized graph_def.
|
52 |
+
with tf.io.gfile.GFile(frozen_graph_filepath, "rb") as f:
|
53 |
+
graph_def = tf.compat.v1.GraphDef()
|
54 |
+
graph_def.ParseFromString(f.read())
|
55 |
+
|
56 |
+
# Import graph def and return.
|
57 |
+
with tf.Graph().as_default() as graph:
|
58 |
+
# Prefix every op/nodes in the graph.
|
59 |
+
tf.import_graph_def(graph_def, name="tcb")
|
60 |
+
return graph
|
61 |
+
|
62 |
+
class ChessboardPredictor(object):
|
63 |
+
"""ChessboardPredictor using saved model"""
|
64 |
+
def __init__(self, frozen_graph_path='./saved_models/frozen_graph.pb'):
|
65 |
+
# Restore model using a frozen graph.
|
66 |
+
print("\t Loading model '%s'" % frozen_graph_path)
|
67 |
+
graph = load_graph(frozen_graph_path)
|
68 |
+
self.sess = tf.compat.v1.Session(graph=graph)
|
69 |
+
|
70 |
+
# Connect input/output pipes to model.
|
71 |
+
self.x = graph.get_tensor_by_name('tcb/Input:0')
|
72 |
+
self.keep_prob = graph.get_tensor_by_name('tcb/KeepProb:0')
|
73 |
+
self.prediction = graph.get_tensor_by_name('tcb/prediction:0')
|
74 |
+
self.probabilities = graph.get_tensor_by_name('tcb/probabilities:0')
|
75 |
+
print("\t Model restored.")
|
76 |
+
|
77 |
+
def getPrediction(self, tiles):
|
78 |
+
"""Run trained neural network on tiles generated from image"""
|
79 |
+
if tiles is None or len(tiles) == 0:
|
80 |
+
print("Couldn't parse chessboard")
|
81 |
+
return None, 0.0
|
82 |
+
|
83 |
+
# Reshape into Nx1024 rows of input data, format used by neural network
|
84 |
+
validation_set = np.swapaxes(np.reshape(tiles, [32*32, 64]),0,1)
|
85 |
+
|
86 |
+
# Run neural network on data
|
87 |
+
guess_prob, guessed = self.sess.run(
|
88 |
+
[self.probabilities, self.prediction],
|
89 |
+
feed_dict={self.x: validation_set, self.keep_prob: 1.0})
|
90 |
+
|
91 |
+
# Prediction bounds
|
92 |
+
a = np.array(list(map(lambda x: x[0][x[1]], zip(guess_prob, guessed))))
|
93 |
+
tile_certainties = a.reshape([8,8])[::-1,:]
|
94 |
+
|
95 |
+
# Convert guess into FEN string
|
96 |
+
# guessed is tiles A1-H8 rank-order, so to make a FEN we just need to flip the files from 1-8 to 8-1
|
97 |
+
labelIndex2Name = lambda label_index: ' KQRBNPkqrbnp'[label_index]
|
98 |
+
pieceNames = list(map(lambda k: '1' if k == 0 else labelIndex2Name(k), guessed)) # exchange ' ' for '1' for FEN
|
99 |
+
fen = '/'.join([''.join(pieceNames[i*8:(i+1)*8]) for i in reversed(range(8))])
|
100 |
+
return fen, tile_certainties
|
101 |
+
|
102 |
+
## Wrapper for chessbot
|
103 |
+
def makePrediction(self, url):
|
104 |
+
"""Try and return a FEN prediction and certainty for URL, return Nones otherwise"""
|
105 |
+
img, url = helper_image_loading.loadImageFromURL(url, max_size_bytes=2000000)
|
106 |
+
result = [None, None, None]
|
107 |
+
|
108 |
+
# Exit on failure to load image
|
109 |
+
if img is None:
|
110 |
+
print('Couldn\'t load URL: "%s"' % url)
|
111 |
+
return result
|
112 |
+
|
113 |
+
# Resize image if too large
|
114 |
+
img = helper_image_loading.resizeAsNeeded(img)
|
115 |
+
|
116 |
+
# Exit on failure if image was too large teo resize
|
117 |
+
if img is None:
|
118 |
+
print('Image too large to resize: "%s"' % url)
|
119 |
+
return result
|
120 |
+
|
121 |
+
# Look for chessboard in image, get corners and split chessboard into tiles
|
122 |
+
tiles, corners = chessboard_finder.findGrayscaleTilesInImage(img)
|
123 |
+
|
124 |
+
# Exit on failure to find chessboard in image
|
125 |
+
if tiles is None:
|
126 |
+
print('Couldn\'t find chessboard in image')
|
127 |
+
return result
|
128 |
+
|
129 |
+
# Make prediction on input tiles
|
130 |
+
fen, tile_certainties = self.getPrediction(tiles)
|
131 |
+
|
132 |
+
# Use the worst case certainty as our final uncertainty score
|
133 |
+
certainty = tile_certainties.min()
|
134 |
+
|
135 |
+
# Get visualize link
|
136 |
+
visualize_link = helper_image_loading.getVisualizeLink(corners, url)
|
137 |
+
|
138 |
+
# Update result and return
|
139 |
+
result = [fen, certainty, visualize_link]
|
140 |
+
return result
|
141 |
+
|
142 |
+
def close(self):
|
143 |
+
print("Closing session.")
|
144 |
+
self.sess.close()
|
145 |
+
|
146 |
+
###########################################################
|
147 |
+
# MAIN CLI
|
148 |
+
|
149 |
+
def main(args):
|
150 |
+
# Load image from filepath or URL
|
151 |
+
if args.filepath:
|
152 |
+
# Load image from file
|
153 |
+
img = helper_image_loading.loadImageFromPath(args.filepath)
|
154 |
+
args.url = None # Using filepath.
|
155 |
+
else:
|
156 |
+
img, args.url = helper_image_loading.loadImageFromURL(args.url)
|
157 |
+
|
158 |
+
# Exit on failure to load image
|
159 |
+
if img is None:
|
160 |
+
raise Exception('Couldn\'t load URL: "%s"' % args.url)
|
161 |
+
|
162 |
+
# Resize image if too large
|
163 |
+
# img = helper_image_loading.resizeAsNeeded(img)
|
164 |
+
|
165 |
+
# Look for chessboard in image, get corners and split chessboard into tiles
|
166 |
+
tiles, corners = chessboard_finder.findGrayscaleTilesInImage(img)
|
167 |
+
|
168 |
+
# Exit on failure to find chessboard in image
|
169 |
+
if tiles is None:
|
170 |
+
raise Exception('Couldn\'t find chessboard in image')
|
171 |
+
|
172 |
+
# Create Visualizer url link
|
173 |
+
if args.url:
|
174 |
+
viz_link = helper_image_loading.getVisualizeLink(corners, args.url)
|
175 |
+
print('---\nVisualize tiles link:\n %s\n---' % viz_link)
|
176 |
+
|
177 |
+
if args.url:
|
178 |
+
print("\n--- Prediction on url %s ---" % args.url)
|
179 |
+
else:
|
180 |
+
print("\n--- Prediction on file %s ---" % args.filepath)
|
181 |
+
|
182 |
+
# Initialize predictor, takes a while, but only needed once
|
183 |
+
predictor = ChessboardPredictor()
|
184 |
+
fen, tile_certainties = predictor.getPrediction(tiles)
|
185 |
+
predictor.close()
|
186 |
+
if args.unflip:
|
187 |
+
fen = unflipFEN(fen)
|
188 |
+
short_fen = shortenFEN(fen)
|
189 |
+
# Use the worst case certainty as our final uncertainty score
|
190 |
+
certainty = tile_certainties.min()
|
191 |
+
|
192 |
+
print('Per-tile certainty:')
|
193 |
+
print(tile_certainties)
|
194 |
+
print("Certainty range [%g - %g], Avg: %g" % (
|
195 |
+
tile_certainties.min(), tile_certainties.max(), tile_certainties.mean()))
|
196 |
+
|
197 |
+
active = args.active
|
198 |
+
print("---\nPredicted FEN:\n%s %s - - 0 1" % (short_fen, active))
|
199 |
+
print("Final Certainty: %.1f%%" % (certainty*100))
|
200 |
+
|
201 |
+
if __name__ == '__main__':
|
202 |
+
np.set_printoptions(suppress=True, precision=3)
|
203 |
+
import argparse
|
204 |
+
parser = argparse.ArgumentParser(description='Predict a chessboard FEN from supplied local image link or URL')
|
205 |
+
parser.add_argument('--url', default='http://imgur.com/u4zF5Hj.png', help='URL of image (ex. http://imgur.com/u4zF5Hj.png)')
|
206 |
+
parser.add_argument('--filepath', help='filepath to image (ex. u4zF5Hj.png)')
|
207 |
+
parser.add_argument('--unflip', default=False, action='store_true', help='revert the image of a flipped chessboard')
|
208 |
+
parser.add_argument('--active', default='w')
|
209 |
+
args = parser.parse_args()
|
210 |
+
main(args)
|
211 |
+
|
212 |
+
|
chessfenbot/tileset_generator.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
#
|
3 |
+
# usage: tileset_generator.py [-h] input_folder output_folder
|
4 |
+
|
5 |
+
# Generate tile images for alll chessboard images in input folder
|
6 |
+
|
7 |
+
# positional arguments:
|
8 |
+
# input_folder Input image folder
|
9 |
+
# output_folder Output tile folder
|
10 |
+
|
11 |
+
# optional arguments:
|
12 |
+
# -h, --help show this help message and exit
|
13 |
+
|
14 |
+
# Pass an input folder and output folder
|
15 |
+
# Builds tile images for each chessboard image in input folder and puts
|
16 |
+
# in the output folder
|
17 |
+
# Used for building training datasets
|
18 |
+
from chessboard_finder import *
|
19 |
+
import os
|
20 |
+
import glob
|
21 |
+
|
22 |
+
def saveTiles(tiles, img_save_dir, img_file):
|
23 |
+
letters = 'ABCDEFGH'
|
24 |
+
if not os.path.exists(img_save_dir):
|
25 |
+
os.makedirs(img_save_dir)
|
26 |
+
|
27 |
+
for i in range(64):
|
28 |
+
sqr_filename = "%s/%s_%s%d.png" % (img_save_dir, img_file, letters[i%8], i/8+1)
|
29 |
+
|
30 |
+
# Make resized 32x32 image from matrix and save
|
31 |
+
if tiles.shape != (32,32,64):
|
32 |
+
PIL.Image.fromarray(tiles[:,:,i]) \
|
33 |
+
.resize([32,32], PIL.Image.ADAPTIVE) \
|
34 |
+
.save(sqr_filename)
|
35 |
+
else:
|
36 |
+
# Possibly saving floats 0-1 needs to change fromarray settings
|
37 |
+
PIL.Image.fromarray((tiles[:,:,i]*255).astype(np.uint8)) \
|
38 |
+
.save(sqr_filename)
|
39 |
+
|
40 |
+
def generateTileset(input_chessboard_folder, output_tile_folder):
|
41 |
+
# Create output folder as needed
|
42 |
+
if not os.path.exists(output_tile_folder):
|
43 |
+
os.makedirs(output_tile_folder)
|
44 |
+
|
45 |
+
# Get all image files of type png/jpg/gif
|
46 |
+
img_files = set(glob.glob("%s/*.png" % input_chessboard_folder))\
|
47 |
+
.union(set(glob.glob("%s/*.jpg" % input_chessboard_folder)))\
|
48 |
+
.union(set(glob.glob("%s/*.gif" % input_chessboard_folder)))
|
49 |
+
|
50 |
+
num_success = 0
|
51 |
+
num_failed = 0
|
52 |
+
num_skipped = 0
|
53 |
+
|
54 |
+
for i, img_path in enumerate(img_files):
|
55 |
+
print("#% 3d/%d : %s" % (i+1, len(img_files), img_path))
|
56 |
+
# Strip to just filename
|
57 |
+
img_file = img_path[len(input_chessboard_folder):-4]
|
58 |
+
|
59 |
+
# Create output save directory or skip this image if it exists
|
60 |
+
img_save_dir = "%s/tiles_%s" % (output_tile_folder, img_file)
|
61 |
+
|
62 |
+
if os.path.exists(img_save_dir):
|
63 |
+
print("\tSkipping existing")
|
64 |
+
num_skipped += 1
|
65 |
+
continue
|
66 |
+
|
67 |
+
# Load image
|
68 |
+
print("---")
|
69 |
+
print("Loading %s..." % img_path)
|
70 |
+
img_arr = np.array(loadImageGrayscale(img_path), dtype=np.float32)
|
71 |
+
|
72 |
+
# Get tiles
|
73 |
+
print("\tGenerating tiles for %s..." % img_file)
|
74 |
+
corners = findChessboardCorners(img_arr)
|
75 |
+
tiles = getChessTilesGray(img_arr, corners)
|
76 |
+
|
77 |
+
# Save tiles
|
78 |
+
if len(tiles) > 0:
|
79 |
+
print("\tSaving tiles %s" % img_file)
|
80 |
+
saveTiles(tiles, img_save_dir, img_file)
|
81 |
+
num_success += 1
|
82 |
+
else:
|
83 |
+
print("\tNo Match, skipping")
|
84 |
+
num_failed += 1
|
85 |
+
|
86 |
+
print("\t%d/%d generated, %d failures, %d skipped." % (num_success,
|
87 |
+
len(img_files) - num_skipped, num_failed, num_skipped))
|
88 |
+
|
89 |
+
if __name__ == '__main__':
|
90 |
+
np.set_printoptions(suppress=True, precision=2)
|
91 |
+
parser = argparse.ArgumentParser(description='Generate tile images for alll chessboard images in input folder')
|
92 |
+
parser.add_argument('input_folder', metavar='input_folder', type=str,
|
93 |
+
help='Input image folder')
|
94 |
+
parser.add_argument('output_folder', metavar='output_folder', type=str,
|
95 |
+
help='Output tile folder')
|
96 |
+
args = parser.parse_args()
|
97 |
+
generateTileset(args.input_folder, args.output_folder)
|
chessfenbot/webkit2png.py
ADDED
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# webkit2png.py
|
3 |
+
#
|
4 |
+
# Creates screenshots of webpages using by QtWebkit.
|
5 |
+
#
|
6 |
+
# Copyright (c) 2014 Roland Tapken <roland@dau-sicher.de>
|
7 |
+
#
|
8 |
+
# This program is free software; you can redistribute it and/or
|
9 |
+
# modify it under the terms of the GNU General Public License
|
10 |
+
# as published by the Free Software Foundation; either version 2
|
11 |
+
# of the License, or (at your option) any later version.
|
12 |
+
#
|
13 |
+
# This program is distributed in the hope that it will be useful,
|
14 |
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
16 |
+
# GNU General Public License for more details.
|
17 |
+
#
|
18 |
+
# You should have received a copy of the GNU General Public License
|
19 |
+
# along with this program; if not, write to the Free Software
|
20 |
+
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
21 |
+
#
|
22 |
+
# Nice ideas "todo":
|
23 |
+
# - Add QTcpSocket support to create a "screenshot daemon" that
|
24 |
+
# can handle multiple requests at the same time.
|
25 |
+
|
26 |
+
import time
|
27 |
+
import os
|
28 |
+
|
29 |
+
from PyQt4.QtCore import *
|
30 |
+
from PyQt4.QtGui import *
|
31 |
+
from PyQt4.QtWebKit import *
|
32 |
+
from PyQt4.QtNetwork import *
|
33 |
+
|
34 |
+
# Class for Website-Rendering. Uses QWebPage, which
|
35 |
+
# requires a running QtGui to work.
|
36 |
+
class WebkitRenderer(QObject):
|
37 |
+
"""
|
38 |
+
A class that helps to create 'screenshots' of webpages using
|
39 |
+
Qt's QWebkit. Requires PyQt4 library.
|
40 |
+
|
41 |
+
Use "render()" to get a 'QImage' object, render_to_bytes() to get the
|
42 |
+
resulting image as 'str' object or render_to_file() to write the image
|
43 |
+
directly into a 'file' resource.
|
44 |
+
"""
|
45 |
+
def __init__(self,**kwargs):
|
46 |
+
"""
|
47 |
+
Sets default values for the properties.
|
48 |
+
"""
|
49 |
+
|
50 |
+
if not QApplication.instance():
|
51 |
+
raise RuntimeError(self.__class__.__name__ + " requires a running QApplication instance")
|
52 |
+
QObject.__init__(self)
|
53 |
+
|
54 |
+
# Initialize default properties
|
55 |
+
self.width = kwargs.get('width', 0)
|
56 |
+
self.height = kwargs.get('height', 0)
|
57 |
+
self.timeout = kwargs.get('timeout', 0)
|
58 |
+
self.wait = kwargs.get('wait', 0)
|
59 |
+
self.scaleToWidth = kwargs.get('scaleToWidth', 0)
|
60 |
+
self.scaleToHeight = kwargs.get('scaleToHeight', 0)
|
61 |
+
self.scaleRatio = kwargs.get('scaleRatio', 'keep')
|
62 |
+
self.format = kwargs.get('format', 'png')
|
63 |
+
self.logger = kwargs.get('logger', None)
|
64 |
+
|
65 |
+
# Set this to true if you want to capture flash.
|
66 |
+
# Not that your desktop must be large enough for
|
67 |
+
# fitting the whole window.
|
68 |
+
self.grabWholeWindow = kwargs.get('grabWholeWindow', False)
|
69 |
+
self.renderTransparentBackground = kwargs.get('renderTransparentBackground', False)
|
70 |
+
self.ignoreAlert = kwargs.get('ignoreAlert', True)
|
71 |
+
self.ignoreConfirm = kwargs.get('ignoreConfirm', True)
|
72 |
+
self.ignorePrompt = kwargs.get('ignorePrompt', True)
|
73 |
+
self.interruptJavaScript = kwargs.get('interruptJavaScript', True)
|
74 |
+
self.encodedUrl = kwargs.get('encodedUrl', False)
|
75 |
+
self.cookies = kwargs.get('cookies', [])
|
76 |
+
|
77 |
+
# Set some default options for QWebPage
|
78 |
+
self.qWebSettings = {
|
79 |
+
QWebSettings.JavascriptEnabled : False,
|
80 |
+
QWebSettings.PluginsEnabled : False,
|
81 |
+
QWebSettings.PrivateBrowsingEnabled : True,
|
82 |
+
QWebSettings.JavascriptCanOpenWindows : False
|
83 |
+
}
|
84 |
+
|
85 |
+
|
86 |
+
def render(self, res):
|
87 |
+
"""
|
88 |
+
Renders the given URL into a QImage object
|
89 |
+
"""
|
90 |
+
# We have to use this helper object because
|
91 |
+
# QApplication.processEvents may be called, causing
|
92 |
+
# this method to get called while it has not returned yet.
|
93 |
+
helper = _WebkitRendererHelper(self)
|
94 |
+
helper._window.resize( self.width, self.height )
|
95 |
+
image = helper.render(res)
|
96 |
+
|
97 |
+
# Bind helper instance to this image to prevent the
|
98 |
+
# object from being cleaned up (and with it the QWebPage, etc)
|
99 |
+
# before the data has been used.
|
100 |
+
image.helper = helper
|
101 |
+
|
102 |
+
return image
|
103 |
+
|
104 |
+
def render_to_file(self, res, file_object):
|
105 |
+
"""
|
106 |
+
Renders the image into a File resource.
|
107 |
+
Returns the size of the data that has been written.
|
108 |
+
"""
|
109 |
+
format = self.format # this may not be constant due to processEvents()
|
110 |
+
image = self.render(res)
|
111 |
+
qBuffer = QBuffer()
|
112 |
+
image.save(qBuffer, format)
|
113 |
+
file_object.write(qBuffer.buffer().data())
|
114 |
+
return qBuffer.size()
|
115 |
+
|
116 |
+
def render_to_bytes(self, res):
|
117 |
+
"""Renders the image into an object of type 'str'"""
|
118 |
+
format = self.format # this may not be constant due to processEvents()
|
119 |
+
image = self.render(res)
|
120 |
+
qBuffer = QBuffer()
|
121 |
+
image.save(qBuffer, format)
|
122 |
+
return qBuffer.buffer().data()
|
123 |
+
|
124 |
+
## @brief The CookieJar class inherits QNetworkCookieJar to make a couple of functions public.
|
125 |
+
class CookieJar(QNetworkCookieJar):
|
126 |
+
def __init__(self, cookies, qtUrl, parent=None):
|
127 |
+
QNetworkCookieJar.__init__(self, parent)
|
128 |
+
for cookie in cookies:
|
129 |
+
QNetworkCookieJar.setCookiesFromUrl(self, QNetworkCookie.parseCookies(QByteArray(cookie)), qtUrl)
|
130 |
+
|
131 |
+
def allCookies(self):
|
132 |
+
return QNetworkCookieJar.allCookies(self)
|
133 |
+
|
134 |
+
def setAllCookies(self, cookieList):
|
135 |
+
QNetworkCookieJar.setAllCookies(self, cookieList)
|
136 |
+
|
137 |
+
class _WebkitRendererHelper(QObject):
|
138 |
+
"""
|
139 |
+
This helper class is doing the real work. It is required to
|
140 |
+
allow WebkitRenderer.render() to be called "asynchronously"
|
141 |
+
(but always from Qt's GUI thread).
|
142 |
+
"""
|
143 |
+
|
144 |
+
def __init__(self, parent):
|
145 |
+
"""
|
146 |
+
Copies the properties from the parent (WebkitRenderer) object,
|
147 |
+
creates the required instances of QWebPage, QWebView and QMainWindow
|
148 |
+
and registers some Slots.
|
149 |
+
"""
|
150 |
+
QObject.__init__(self)
|
151 |
+
|
152 |
+
# Copy properties from parent
|
153 |
+
for key,value in parent.__dict__.items():
|
154 |
+
setattr(self,key,value)
|
155 |
+
|
156 |
+
# Determine Proxy settings
|
157 |
+
proxy = QNetworkProxy(QNetworkProxy.NoProxy)
|
158 |
+
if 'http_proxy' in os.environ:
|
159 |
+
proxy_url = QUrl(os.environ['http_proxy'])
|
160 |
+
if unicode(proxy_url.scheme()).startswith('http'):
|
161 |
+
protocol = QNetworkProxy.HttpProxy
|
162 |
+
else:
|
163 |
+
protocol = QNetworkProxy.Socks5Proxy
|
164 |
+
|
165 |
+
proxy = QNetworkProxy(
|
166 |
+
protocol,
|
167 |
+
proxy_url.host(),
|
168 |
+
proxy_url.port(),
|
169 |
+
proxy_url.userName(),
|
170 |
+
proxy_url.password()
|
171 |
+
)
|
172 |
+
|
173 |
+
# Create and connect required PyQt4 objects
|
174 |
+
self._page = CustomWebPage(logger=self.logger, ignore_alert=self.ignoreAlert,
|
175 |
+
ignore_confirm=self.ignoreConfirm, ignore_prompt=self.ignorePrompt,
|
176 |
+
interrupt_js=self.interruptJavaScript)
|
177 |
+
self._page.networkAccessManager().setProxy(proxy)
|
178 |
+
self._view = QWebView()
|
179 |
+
self._view.setPage(self._page)
|
180 |
+
self._window = QMainWindow()
|
181 |
+
self._window.setCentralWidget(self._view)
|
182 |
+
|
183 |
+
# Import QWebSettings
|
184 |
+
for key, value in self.qWebSettings.iteritems():
|
185 |
+
self._page.settings().setAttribute(key, value)
|
186 |
+
|
187 |
+
# Connect required event listeners
|
188 |
+
self.connect(self._page, SIGNAL("loadFinished(bool)"), self._on_load_finished)
|
189 |
+
self.connect(self._page, SIGNAL("loadStarted()"), self._on_load_started)
|
190 |
+
self.connect(self._page.networkAccessManager(), SIGNAL("sslErrors(QNetworkReply *,const QList<QSslError>&)"), self._on_ssl_errors)
|
191 |
+
self.connect(self._page.networkAccessManager(), SIGNAL("finished(QNetworkReply *)"), self._on_each_reply)
|
192 |
+
|
193 |
+
# The way we will use this, it seems to be unesseccary to have Scrollbars enabled
|
194 |
+
self._page.mainFrame().setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff)
|
195 |
+
self._page.mainFrame().setScrollBarPolicy(Qt.Vertical, Qt.ScrollBarAlwaysOff)
|
196 |
+
self._page.settings().setUserStyleSheetUrl(QUrl("data:text/css,html,body{overflow-y:hidden !important;}"))
|
197 |
+
|
198 |
+
# Show this widget
|
199 |
+
self._window.show()
|
200 |
+
|
201 |
+
def __del__(self):
|
202 |
+
"""
|
203 |
+
Clean up Qt4 objects.
|
204 |
+
"""
|
205 |
+
self._window.close()
|
206 |
+
del self._window
|
207 |
+
del self._view
|
208 |
+
del self._page
|
209 |
+
|
210 |
+
def render(self, res):
|
211 |
+
"""
|
212 |
+
The real worker. Loads the page (_load_page) and awaits
|
213 |
+
the end of the given 'delay'. While it is waiting outstanding
|
214 |
+
QApplication events are processed.
|
215 |
+
After the given delay, the Window or Widget (depends
|
216 |
+
on the value of 'grabWholeWindow' is drawn into a QPixmap
|
217 |
+
and postprocessed (_post_process_image).
|
218 |
+
"""
|
219 |
+
self._load_page(res, self.width, self.height, self.timeout)
|
220 |
+
# Wait for end of timer. In this time, process
|
221 |
+
# other outstanding Qt events.
|
222 |
+
if self.wait > 0:
|
223 |
+
if self.logger: self.logger.debug("Waiting %d seconds " % self.wait)
|
224 |
+
waitToTime = time.time() + self.wait
|
225 |
+
while time.time() < waitToTime:
|
226 |
+
if QApplication.hasPendingEvents():
|
227 |
+
QApplication.processEvents()
|
228 |
+
|
229 |
+
if self.renderTransparentBackground:
|
230 |
+
# Another possible drawing solution
|
231 |
+
image = QImage(self._page.viewportSize(), QImage.Format_ARGB32)
|
232 |
+
image.fill(QColor(255,0,0,0).rgba())
|
233 |
+
|
234 |
+
# http://ariya.blogspot.com/2009/04/transparent-qwebview-and-qwebpage.html
|
235 |
+
palette = self._view.palette()
|
236 |
+
palette.setBrush(QPalette.Base, Qt.transparent)
|
237 |
+
self._page.setPalette(palette)
|
238 |
+
self._view.setAttribute(Qt.WA_OpaquePaintEvent, False)
|
239 |
+
|
240 |
+
painter = QPainter(image)
|
241 |
+
painter.setBackgroundMode(Qt.TransparentMode)
|
242 |
+
self._page.mainFrame().render(painter)
|
243 |
+
painter.end()
|
244 |
+
else:
|
245 |
+
if self.grabWholeWindow:
|
246 |
+
# Note that this does not fully ensure that the
|
247 |
+
# window still has the focus when the screen is
|
248 |
+
# grabbed. This might result in a race condition.
|
249 |
+
self._view.activateWindow()
|
250 |
+
image = QPixmap.grabWindow(self._window.winId())
|
251 |
+
else:
|
252 |
+
image = QPixmap.grabWidget(self._window)
|
253 |
+
|
254 |
+
return self._post_process_image(image)
|
255 |
+
|
256 |
+
def _load_page(self, res, width, height, timeout):
|
257 |
+
"""
|
258 |
+
This method implements the logic for retrieving and displaying
|
259 |
+
the requested page.
|
260 |
+
"""
|
261 |
+
|
262 |
+
# This is an event-based application. So we have to wait until
|
263 |
+
# "loadFinished(bool)" raised.
|
264 |
+
cancelAt = time.time() + timeout
|
265 |
+
self.__loading = True
|
266 |
+
self.__loadingResult = False # Default
|
267 |
+
|
268 |
+
# When "res" is of type tuple, it has two elements where the first
|
269 |
+
# element is the HTML code to render and the second element is a string
|
270 |
+
# setting the base URL for the interpreted HTML code.
|
271 |
+
# When resource is of type str or unicode, it is handled as URL which
|
272 |
+
# shal be loaded
|
273 |
+
if type(res) == tuple:
|
274 |
+
url = res[1]
|
275 |
+
else:
|
276 |
+
url = res
|
277 |
+
|
278 |
+
if self.encodedUrl:
|
279 |
+
qtUrl = QUrl.fromEncoded(url)
|
280 |
+
else:
|
281 |
+
qtUrl = QUrl(url)
|
282 |
+
|
283 |
+
# Set the required cookies, if any
|
284 |
+
self.cookieJar = CookieJar(self.cookies, qtUrl)
|
285 |
+
self._page.networkAccessManager().setCookieJar(self.cookieJar)
|
286 |
+
|
287 |
+
# Load the page
|
288 |
+
if type(res) == tuple:
|
289 |
+
self._page.mainFrame().setHtml(res[0], qtUrl) # HTML, baseUrl
|
290 |
+
else:
|
291 |
+
self._page.mainFrame().load(qtUrl)
|
292 |
+
|
293 |
+
while self.__loading:
|
294 |
+
if timeout > 0 and time.time() >= cancelAt:
|
295 |
+
raise RuntimeError("Request timed out on %s" % res)
|
296 |
+
while QApplication.hasPendingEvents() and self.__loading:
|
297 |
+
QCoreApplication.processEvents()
|
298 |
+
|
299 |
+
if self.logger: self.logger.debug("Processing result")
|
300 |
+
|
301 |
+
if self.__loading_result == False:
|
302 |
+
if self.logger: self.logger.warning("Failed to load %s" % res)
|
303 |
+
|
304 |
+
# Set initial viewport (the size of the "window")
|
305 |
+
size = self._page.mainFrame().contentsSize()
|
306 |
+
if self.logger: self.logger.debug("contentsSize: %s", size)
|
307 |
+
if width > 0:
|
308 |
+
size.setWidth(width)
|
309 |
+
if height > 0:
|
310 |
+
size.setHeight(height)
|
311 |
+
|
312 |
+
self._window.resize(size)
|
313 |
+
|
314 |
+
def _post_process_image(self, qImage):
|
315 |
+
"""
|
316 |
+
If 'scaleToWidth' or 'scaleToHeight' are set to a value
|
317 |
+
greater than zero this method will scale the image
|
318 |
+
using the method defined in 'scaleRatio'.
|
319 |
+
"""
|
320 |
+
if self.scaleToWidth > 0 or self.scaleToHeight > 0:
|
321 |
+
# Scale this image
|
322 |
+
if self.scaleRatio == 'keep':
|
323 |
+
ratio = Qt.KeepAspectRatio
|
324 |
+
elif self.scaleRatio in ['expand', 'crop']:
|
325 |
+
ratio = Qt.KeepAspectRatioByExpanding
|
326 |
+
else: # 'ignore'
|
327 |
+
ratio = Qt.IgnoreAspectRatio
|
328 |
+
qImage = qImage.scaled(self.scaleToWidth, self.scaleToHeight, ratio, Qt.SmoothTransformation)
|
329 |
+
if self.scaleRatio == 'crop':
|
330 |
+
qImage = qImage.copy(0, 0, self.scaleToWidth, self.scaleToHeight)
|
331 |
+
return qImage
|
332 |
+
|
333 |
+
def _on_each_reply(self,reply):
|
334 |
+
"""
|
335 |
+
Logs each requested uri
|
336 |
+
"""
|
337 |
+
# print "Received %s" % (reply.url().toString())
|
338 |
+
# self.logger.debug("Received %s" % (reply.url().toString()))
|
339 |
+
|
340 |
+
# Eventhandler for "loadStarted()" signal
|
341 |
+
def _on_load_started(self):
|
342 |
+
"""
|
343 |
+
Slot that sets the '__loading' property to true
|
344 |
+
"""
|
345 |
+
if self.logger: self.logger.debug("loading started")
|
346 |
+
self.__loading = True
|
347 |
+
|
348 |
+
# Eventhandler for "loadFinished(bool)" signal
|
349 |
+
def _on_load_finished(self, result):
|
350 |
+
"""Slot that sets the '__loading' property to false and stores
|
351 |
+
the result code in '__loading_result'.
|
352 |
+
"""
|
353 |
+
if self.logger: self.logger.debug("loading finished with result %s", result)
|
354 |
+
self.__loading = False
|
355 |
+
self.__loading_result = result
|
356 |
+
|
357 |
+
# Eventhandler for "sslErrors(QNetworkReply *,const QList<QSslError>&)" signal
|
358 |
+
def _on_ssl_errors(self, reply, errors):
|
359 |
+
"""
|
360 |
+
Slot that writes SSL warnings into the log but ignores them.
|
361 |
+
"""
|
362 |
+
for e in errors:
|
363 |
+
if self.logger: self.logger.warn("SSL: " + e.errorString())
|
364 |
+
reply.ignoreSslErrors()
|
365 |
+
|
366 |
+
|
367 |
+
class CustomWebPage(QWebPage):
|
368 |
+
def __init__(self, **kwargs):
|
369 |
+
"""
|
370 |
+
Class Initializer
|
371 |
+
"""
|
372 |
+
super(CustomWebPage, self).__init__()
|
373 |
+
self.logger = kwargs.get('logger', None)
|
374 |
+
self.ignore_alert = kwargs.get('ignore_alert', True)
|
375 |
+
self.ignore_confirm = kwargs.get('ignore_confirm', True)
|
376 |
+
self.ignore_prompt = kwargs.get('ignore_prompt', True)
|
377 |
+
self.interrupt_js = kwargs.get('interrupt_js', True)
|
378 |
+
|
379 |
+
def javaScriptAlert(self, frame, message):
|
380 |
+
if self.logger: self.logger.debug('Alert: %s', message)
|
381 |
+
if not self.ignore_alert:
|
382 |
+
return super(CustomWebPage, self).javaScriptAlert(frame, message)
|
383 |
+
|
384 |
+
def javaScriptConfirm(self, frame, message):
|
385 |
+
if self.logger: self.logger.debug('Confirm: %s', message)
|
386 |
+
if not self.ignore_confirm:
|
387 |
+
return super(CustomWebPage, self).javaScriptConfirm(frame, message)
|
388 |
+
else:
|
389 |
+
return False
|
390 |
+
|
391 |
+
def javaScriptPrompt(self, frame, message, default, result):
|
392 |
+
"""
|
393 |
+
This function is called whenever a JavaScript program running inside frame tries to prompt
|
394 |
+
the user for input. The program may provide an optional message, msg, as well as a default value
|
395 |
+
for the input in defaultValue.
|
396 |
+
|
397 |
+
If the prompt was cancelled by the user the implementation should return false;
|
398 |
+
otherwise the result should be written to result and true should be returned.
|
399 |
+
If the prompt was not cancelled by the user, the implementation should return true and
|
400 |
+
the result string must not be null.
|
401 |
+
"""
|
402 |
+
if self.logger: self.logger.debug('Prompt: %s (%s)' % (message, default))
|
403 |
+
if not self.ignore_prompt:
|
404 |
+
return super(CustomWebPage, self).javaScriptPrompt(frame, message, default, result)
|
405 |
+
else:
|
406 |
+
return False
|
407 |
+
|
408 |
+
def shouldInterruptJavaScript(self):
|
409 |
+
"""
|
410 |
+
This function is called when a JavaScript program is running for a long period of time.
|
411 |
+
If the user wanted to stop the JavaScript the implementation should return true; otherwise false.
|
412 |
+
"""
|
413 |
+
if self.logger: self.logger.debug("WebKit ask to interrupt JavaScript")
|
414 |
+
return self.interrupt_js
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
python=3.7
|
2 |
+
-r tensorflow_chessbot-chessfenbot/requirements.txt
|
3 |
+
gradio==3.0.19
|
4 |
+
|