In [1]:
from forge import *
from puzzle.puzzlepedia import prod_config
prod_config.init()
In [2]:
import cv2
import glob
import importlib
import io
import itertools
import numpy as np
import math
import matplotlib.pyplot as plt
from IPython.display import display
from PIL import Image
from ipywidgets import widgets
from os import path
from sklearn.neighbors import NearestNeighbors
from typing import Iterable, Tuple
from data import data
from util import perf
from util.geometry import np2d
from data.image import coloring, component, component_database, image, utils
from puzzle.problems.image import image_problem
from puzzle.steps.image import decompose
importlib.reload(image)
importlib.reload(np2d)
importlib.reload(utils)
importlib.reload(coloring)
_FILE_PATTERN = '*.png'
_MAX = 255
_THRESHOLD = 5
_FOCUS = {
}
def image_path(name: str, subdir: str = 'original') -> str:
return path.join(data.project_path('data/grid'), subdir, name)
def get_img(name, subdir: str = 'original'):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
return cv2.imread(image_path(name, subdir=subdir), flags=cv2.IMREAD_UNCHANGED)
def get_image(name):
return image.Image(get_img(name))
def show(label, img=None):
if img is None:
img = label
else:
print(label)
height, width = img.shape[:2]
f = io.BytesIO()
Image.fromarray(np.array(img, dtype=np.uint8)).save(f, 'png')
# This method keeps data out of notebook source.
display(widgets.Image(
value=f.getvalue(),
width=width,
height=height,
))
def show_components(g):
db = component_database.ComponentDatabase()
shown = set()
for c in g.components:
identified = db.identify(c)
symbol = identified.labels.get('symbol')
if symbol is None:
pass
elif symbol in shown:
continue
shown.add(symbol)
show(c.image)
def imgs(subdir: str = 'original') -> Iterable[np.ndarray]:
for filename in sorted(glob.glob(image_path(_FILE_PATTERN, subdir))):
name = path.basename(filename)
if _FOCUS and name not in _FOCUS:
continue
yield (
name,
cv2.imread(filename, flags=cv2.IMREAD_UNCHANGED))
def images() -> Iterable[image.Image]:
for name, img in imgs():
if _FOCUS and name not in _FOCUS:
continue
yield name, image.Image(img)
_PADDING = 8
def study(*segments, padding=_PADDING):
"""Studies line segments."""
segments = [np.array(s) for s in segments]
min_x = min(s[:, 0].min() for s in segments)
min_y = min(s[:, 1].min() for s in segments)
move_to_zero = np.array([min_x, min_y])
for s in segments:
print(repr(s))
s -= move_to_zero - padding
max_x = int(max(s[:, 0].max() for s in segments) + padding)
max_y = int(max(s[:, 1].max() for s in segments) + padding)
image = np.zeros((max_y + 1, max_x + 1, 3), dtype=np.uint8)
colors = coloring.colors(len(segments))
for s, color in zip(segments, colors):
cv2.line(image, tuple(map(int, s[0])), tuple(map(int, s[1])), color.tolist(), 1)
image = cv2.resize(image, None, fx=2, fy=2)
show(image)
return image
In [14]:
from data.image import contours_classifier, coloring, component, component_database, divide_distances_evenly, image, lines_classifier, utils
from puzzle.steps.image import identify_regions
importlib.reload(identify_regions)
importlib.reload(lines_classifier)
importlib.reload(contours_classifier)
identify_regions.show = show
contours_classifier.show = show
_FOCUS = {
# 'arrow.png',
# 'askew.png',
# 'cages.png',
# 'castlewall.png',
# 'consecutive.png',
# 'crossword.png',
# 'fillomino.png',
# 'kakuro.png',
# 'kenken.png',
'nonogram.png',
# 'nurimaze.png',
# 'masyu.png',
# 'multi.png',
# 'pentopia.png',
# 'skyscraper.png',
# 'pathfinder.png',
# 'thermo.png',
# 'rowsgarden.png',
# 'slitherlink.png',
# 'spiral.png',
# 'strimko.png',
# 'wordsearch.png',
# 'wordsearch_with_bank.png', # TODO: Restore this image.
}
for n, i in imgs():
p = image_problem.ImageProblem(n, i)
p._identify_regions._debug()
In [16]:
from data.image import lines_classifier, coloring, component, component_database, divide_distances_evenly, image, utils
from util.geometry import np2d
from puzzle.constraints.image import decompose_constraints
from puzzle.constraints.image import lines_classifier_constraints
from util import perf
importlib.reload(coloring)
importlib.reload(divide_distances_evenly)
importlib.reload(np2d)
#importlib.reload(perf)
importlib.reload(lines_classifier)
importlib.reload(lines_classifier_constraints)
importlib.reload(utils)
importlib.reload(decompose_constraints)
importlib.reload(decompose)
importlib.reload(image)
importlib.reload(image_problem)
#image.show = show
decompose.show = show
lines_classifier.show = show
lines_classifier.plt = plt
_FOCUS = {
# 'arrow.png',
# 'askew.png',
# 'cages.png',
# 'castlewall.png',
# 'consecutive.png',
# 'crossword.png',
# 'fillomino.png',
# 'kakuro.png',
# 'kenken.png',
# 'nonogram.png',
# 'nurimaze.png',
# 'masyu.png',
# 'multi.png',
# 'pentopia.png',
# 'skyscraper.png',
# 'pathfinder.png',
# 'thermo.png',
# 'rowsgarden.png',
# 'slitherlink.png',
# 'spiral.png',
# 'strimko.png',
# 'wordsearch.png',
# 'wordsearch_with_bank.png', # TODO: Restore this image.
}
# Boundary shy: crossword.png, fillomino.png, kakuro.png, masyu.png
# ???: wordsearch.png, kakuro.png, fillomino.png, consecutive.png, castlewall.png
# Skips points: fillomino.png
# Slow: wordsearch.png
# Imprecise: askew.png, kakuro.png, multi.png, rowsgarden.png, wordsearch.png
# Angle clamping: arrow.png, kakuro.png, nonogram.png, rowsgarden.png
# Very slow: wordsearch_with_bank.png.
# Broken with "optimal" fixes: kakuro.png, kenken.png
DEBUG = len(_FOCUS) > 0
lines_classifier.DEBUG = DEBUG
divide_distances_evenly.DEBUG = DEBUG
for n, i in imgs():
p = image_problem.ImageProblem(n, i)
height, width = p._prepare_image.get_result().shape[:2]
threshold = int(min(width, height) // 2)
dst = cv2.cvtColor(p._prepare_image.get_debug_data(), cv2.COLOR_GRAY2BGR)
prepared_result = p._prepare_image.get_result()
classified = lines_classifier.LinesClassifier(
prepared_result, lines_classifier_constraints.LinesClassifierConstraints())
if DEBUG:
show(classified.get_debug_data())
for spec in classified.line_specs():
if DEBUG:
show('spec', spec._grid)
combined = prepared_result.fork().mask(spec._grid)
show('%s (%0.4f%%)' % (n, spec._score), combined.get_debug_data())
break # Process just the first one.
print('done')
print(perf.report())
In [57]:
print(6749, 157859, 0.04275334317333823)
print(8140, 157859, 0.05156500421262012)
print(112196, 160774, 0.6978491547140707) # round()
print(56510, 159173, 0.3550225226640196) # round(1)
print(20523, 157859, 0.13000842524024606) # round(2)
print(158473, 163358, 0.9700963527957003) # 2, 5%
print(142070, 162483, 0.87436839546291) # 1, 1%
print(131703, 160623, 0.8199510655385592) # 1, 0.5%
print(100931, 158557, 0.6365597230018227) # 1, 0.1%
print(18200, 157908, 0.11525698507991995) # 1, 5/max
print(38757, 157908, 0.24544038300782733) # 2, 5/max
print(45647, 158058, 0.28879904845056875) # 2, 10/max
print(55469, 157908, 0.3512741596372571) # 2, 20/max
print(69179, 159628, 0.43337635001378205) # 2, 40/max
print(99945, 161393, 0.6192647760435707) # 2, 160/max <- bad results
print(83927, 160016, 0.524491300869913) # 2, 80/max <- OK
print(61947, 135562, 0.4569643410395243) # 2, 85/max <- OK
print(59462, 106213, 0.5598373080508036) # scale(2, 50/max) <- weak results
print(46059, 152708, 0.3016148466354088) # 100%, scale(1, 50/max) <- weak results
print(71975, 136118, 0.5287691561733202) # 95%, 1, 85/max
print('%s, %s, %s' % (divide_distances_evenly.skip, divide_distances_evenly.total, divide_distances_evenly.skip / divide_distances_evenly.total))
In [17]:
from data.image import contours_classifier, coloring, component, component_database, image, utils
from puzzle.constraints.image import decompose_constraints
importlib.reload(contours_classifier)
importlib.reload(coloring)
importlib.reload(utils)
importlib.reload(decompose_constraints)
importlib.reload(decompose)
importlib.reload(image)
importlib.reload(image_problem)
contours_classifier.show = show
_FOCUS = {
'askew.png',
'arrow.png',
# 'cages.png',
# 'castlewall.png',
# 'fillomino.png',
# 'kenken.png',
# 'nonogram.png',
# 'pathfinder.png',
'thermo.png',
# 'rowsgarden.png',
# 'slitherlink.png',
# 'spiral.png',
# 'strimko.png',
}
for n, i in imgs():
p = image_problem.ImageProblem(n, i)
#show(p._decompose.get_debug_data())
src = p._prepare_image.get_result().get_debug_data()
height, width = src.shape[:2]
threshold = int(min(width, height) // 2)
#show('src', src)
# Edge detection
#canny = cv2.Canny(src, 50, 200, None, 3)
#kernel = np.ones((2, 2))
#cv2.dilate(canny, kernel, iterations = 1, dst=canny)
dst = cv2.cvtColor(src, cv2.COLOR_GRAY2BGR)
classified = contours_classifier.ContoursClassifier()
classified.classify(image.Image(src))
#show(classified._tmp_image)
In [ ]:
# WIP source from grid.py:
from data import lazy
class Grid(object):
def grid_without_threshold(self) -> np.ndarray:
# TODO: Keep metadata on component positions.
grayscale = np.copy(self.grayscale_inv)
for mask, color in itertools.chain(
self._layer_masks(), self._component_masks(include_inverted=True)):
if color == 0:
weight = -1
else:
weight = 1
cv2.addWeighted(grayscale, 1, utils.antialias(mask), weight, 0, dst=grayscale)
return grayscale
@lazy.prop
def grid(self) -> np.ndarray:
src = np.array(np.where(self.grid_without_threshold > _THRESHOLD, _MAX, 0), dtype=np.uint8)
return utils.preserve_stroke(src, _MAX, .9)
def _layer_masks(self, n: int = 6, show = lambda *x: None) -> Iterable[Tuple[np.ndarray, int]]:
src = self.grayscale_inv
show(src)
# DO NOT SUBMIT: "show" param.
batches = list(reversed(list(
coloring.top_n_color_clusters(self._grayscale_inv_bincount, n))))
print(batches)
kernel_size = 5
kernel = utils.kernel_circle(kernel_size)
# Normalized kernal used during blurring. Multiply 2x to intensify.
kernel_normalized = 2 * kernel / np.count_nonzero(kernel)
forbidden_zone = np.zeros_like(src)
blocked_next = forbidden_zone
for batch in batches:
low, high = batch[0] - _THRESHOLD, batch[-1] + _THRESHOLD
targeted = np.where(((low < src) & (src < high)), src, 0)
blocked_count = np.count_nonzero((targeted != 0) & (blocked_next != 0))
print('blocked count:', blocked_count, 100 * blocked_count / src.size)
if low > 0 and high < _MAX:
print('targeted batch %s [%s, %s] (%s)' % (batch, low, high, kernel_size))
show(targeted)
show('brighter', np.where(src > high, 255, 0))
# WARNING: 1.75 is very finely tuned. Any lower and grid lines are
# removed.
opened = utils.preserve_stroke(targeted, low, 1.75)
if not np.any(opened):
continue
#opened_percent = 100 * np.count_nonzero(targeted) / opened.size
#if opened_percent < 1: continue
#print('opened %.02f' % opened_percent)
show('opened', opened)
yield opened, 0
if show:
blurred = cv2.filter2D(
opened,
cv2.CV_8UC1,
kernel_normalized,
borderType=cv2.BORDER_ISOLATED)
print('blurred')
show(blurred)
subtracted = src - blurred
result = np.array(np.where(src < blurred, 0, subtracted), dtype=np.uint8)
show('result', result)
show('nonzero', np.array(np.where(result > 0, 255, 0), dtype=np.uint8))
# Mark more territory as forbidden.
np.maximum(forbidden_zone, targeted, out=forbidden_zone)
blocked_next = cv2.dilate(forbidden_zone, kernel, iterations=2)
show('blocked_next', blocked_next)
In [11]:
original = [
0, 5, 44, 47, 87, 90, 129, 134, 171, 175, 214, 222, 261, 264, 304, 307,
346, 350, 387, 392, 430, 438, 476, 481, 518, 521, 562, 564, 605, 608, 646,
654, 693, 697, 734, 739, 777, 781, 820, 824, 863, 870, 910, 912, 953, 955,
993, 998, 1036, 1038, 1079, 1086
]
distances = [i * 5 for i in range(20)]
def convert(distances, window=1):
as_set = set(distances)
result = [int(i in as_set) for i in range(distances[-1] + 1)]
return np.convolve(result, np.array([1/window] * window))
def plot_distances(*all_distances):
all_converted = []
for distances in all_distances:
as_axis = convert(distances)
all_converted.append(as_axis)
plt.plot(as_axis)
#print(' '.join(map(str, as_axis)))
plt.show()
all_fft = []
for as_axis in all_converted:
fft_result = np.abs(np.fft.fft(as_axis))
plt.plot(fft_result)
all_fft.append(fft_result)
plt.show()
distances = sorted([i * 10 for i in range(20)] + [2 + i * 10 for i in range(20)])
offset = []
offset.extend([i * 10 + 9 for i in range(20)])
doubled = [i * 20 for i in range(10)]
#plot_distances(distances)
#plot_distances(distances, offset, doubled)
#plot_distances(original)
# Observations:
# len(fft) == len(input)
# max(amplitude) = # sum of non-zero inputs
# number of peaks (after x = 0) appears to equal number of zeros between numbers
In [8]:
THETAS = (0, math.pi / 3, 2 * math.pi / 3)
DEGREE = math.pi / 180
width, height = 100, 100
cx, cy = round(width // 2), round(height // 2)
def distance(width, height, cx, cy, theta):
cos_theta = math.cos(theta)
if cos_theta > 0: # Pointing right.
dx = width - cx
else: # Pointing left.
dx = -cx
sin_theta = math.sin(theta)
if sin_theta > 0: # Pointing up (down in image).
dy = height - cy
else:
dy = -cy
if cos_theta:
hypotenuse_to_x = dx / cos_theta
else:
hypotenuse_to_x = float('inf')
if sin_theta:
hypotenuse_to_y = dy / sin_theta
else:
hypotenuse_to_y = float('inf')
if theta > (2 * math.pi / 3):
print('%dx%d @ (%d, %d) by %d angle %d' % (
width, height, cx, cy, -min(hypotenuse_to_x, hypotenuse_to_y),
math.degrees(theta - math.pi)))
else:
print('%dx%d @ (%d, %d) by %d angle %d' % (
width, height, cx, cy, min(hypotenuse_to_x, hypotenuse_to_y),
math.degrees(theta)))
return min(hypotenuse_to_x, hypotenuse_to_y)
img = np.zeros((height, width, 3))
print('image shape is', img.shape)
cv2.circle(img, (cx, cy), 2, (255, 255, 255), thickness=3)
for color, base_theta in zip(coloring.colors(len(THETAS)), THETAS):
for offset in (0, math.pi):
theta = base_theta + offset + 30 * DEGREE
h = distance(width, height, cx, cy, theta)
dx = round(math.cos(theta) * h)
dy = round(math.sin(theta) * h)
cv2.line(img, (cx, cy), (cx + dx, cy + dy), color, thickness=1)
cv2.circle(img, (cx + dx, cy + dy), 4, (255, 255, 255), thickness=1)
show(img)
"""
850x989 @ (486, 425) by 29 = [-561, 420]
850x989 @ (486, 425) by 90 = [-425, 564]
850x989 @ (486, 425) by 149 = [-420, 561]
moving (486, 425) by -561 at angle -29
moving (486, 425) by 420 at angle -29
moving (486, 425) by -561 at angle 29
moving (486, 425) by 420 at angle 29
moving (486, 425) by -425 at angle 90
moving (486, 425) by 564 at angle 90
moving (100, 100) by -115 at angle -29
moving (100, 100) by 200 at angle -29
moving (100, 100) by -115 at angle 29
moving (100, 100) by 866 at angle 29
moving (100, 100) by -100 at angle 90
moving (100, 100) by 889 at angle 90
"""
Out[8]:
In [ ]: