In [ ]:
import json
import pathlib

from pikov import JSONGraph
from pikov import sprite

In [ ]:
# Helper for displaying images.

# source: http://nbviewer.ipython.org/gist/deeplook/5162445
from io import BytesIO

from IPython import display
from PIL import Image


def display_pil_image(im):
   """Displayhook function for PIL Images, rendered as PNG."""

   b = BytesIO()
   im.save(b, format='png')
   data = b.getvalue()

   ip_img = display.Image(data=data, format='png', embed=True)
   return ip_img._repr_png_()


# register display func with PNG formatter:
png_formatter = get_ipython().display_formatter.formatters['image/png']
dpi = png_formatter.for_type(Image.Image, display_pil_image)

In [ ]:
sample_dir = (pathlib.Path("..") / ".." / "samples").resolve()

with open(sample_dir / "pikov-core.json") as fp:
    core_types = json.load(fp)
    #graph = JSONGraph.load(fp)

In [ ]:
sample_path = sample_dir / "gamekitty.json"

# Merge core types into pikov.json
graph = JSONGraph.load(sample_path)
for key, item in core_types["guidMap"].items():
    graph._guid_map[key] = item

Build names mapping

To make it a little easier to check that I'm using the correct guids, construct a mapping from names back to guid.

Note: this adds a constraint that no two nodes have the same name, which should not be enforced for general semantic graphs.


In [5]:
names = {}
for node in graph:
    for edge in node:
        if edge.guid == "169a81aefca74e92b45e3fa03c7021df":
            value = node[edge].value
            if value in names:
                raise ValueError('name: "{}" defined twice'.format(value))
            names[value] = node
     
names["ctor"]

In [6]:
def name_to_guid(name):
    if name not in names:
        return None
    node = names[name]
    if not hasattr(node, "guid"):
        return None
    return node.guid

Pikov Classes

These classes are the core resources used in defining a "Pikov" file.

Note: ideally these classes could be derived from the graph itself, but I don't (yet) encode type or field information in the pikov.json semantic graph.


In [7]:
from pikov.sprite import Bitmap, Clip, Frame, FrameList, Resource, Transition

Gamekitty

Create instances of the Pikov classes to define a concrete Pikov graph, based on my "gamekitty" animations.

Load the spritesheet

In the previous notebook, we chopped the spritesheet into bitmaps. Find those and save them to an array so that they can be indexed as they were in the original PICO-8 gamekitty doodle.


In [8]:
resource = Resource(graph, guid=name_to_guid("spritesheet"))

spritesheet = []
for row in range(16):
    for column in range(16):
        sprite_number = row * 16 + column
        bitmap_name = "bitmap[{}]".format(sprite_number)
        bitmap = Bitmap(graph, guid=name_to_guid(bitmap_name))
        spritesheet.append(bitmap)

Create frames for each "clip"

Each animation is defined in terms of sprite numbers. Sometimes a clip should loop, but sometimes it's only used as a transition between looping clips.


In [9]:
def find_nodes(graph, ctor, cls):
    nodes = set()
    # TODO: With graph formats that have indexes, there should be a faster way.
    for node in graph:
        if node[names["ctor"]] == ctor:
            node = cls(graph, guid=node.guid)
            nodes.add(node)
    return nodes


def find_frames(graph):
    return find_nodes(graph, names["frame"], Frame)


def find_transitions(graph):
    return find_nodes(graph, names["transition"], Transition)


def find_absorbing_frames(graph):
    transitions = find_transitions(graph)
    target_frames = set()
    source_frames = set()
    for transition in transitions:
        target_frames.add(transition.target.guid)
        source_frames.add(transition.source.guid)
    return target_frames - source_frames  # In but not out. Dead end!

In [10]:
MICROS_12_FPS = int(1e6 / 12)  # 12 frames per second
MICROS_24_FPS = int(1e6 / 24)


def connect_frames(graph, transition_name, source, target):
    transition = Transition(graph, guid=name_to_guid(transition_name))
    transition.name = transition_name
    transition.source = source
    transition.target = target
    return transition

In [11]:
sit = Clip(graph, guid=name_to_guid("clip[sit]"))
sit


Out[11]:
Clip
preview

In [13]:
sit_to_stand = Clip(graph, guid=name_to_guid("clip[sit_to_stand]"))
sit_to_stand


Out[13]:
Clip
preview

In [15]:
stand_waggle= Clip(graph, guid=name_to_guid("clip[stand_waggle]"))
stand_waggle


Out[15]:
Clip
preview

In [16]:
connect_frames(
    graph,
    "transitions[sit_to_stand, stand_waggle]",
    sit_to_stand[-1],
    stand_waggle[0])


Out[16]:
Transition
guid406ac656142a45c0a1ff3f1716e84fac
nametransitions[sit_to_stand, stand_waggle]
source.guidf6a2db33bca64aa799d3690ced24b187
source.image
target.guide78112cca4b847528b93a8decd55db22
target.image

In [17]:
stand_to_sit = Clip(graph, guid=name_to_guid("clip[stand_to_sit]"))
stand_to_sit


Out[17]:
Clip
preview

In [18]:
connect_frames(
    graph,
    "transitions[stand_waggle, stand_to_sit]",
    stand_waggle[-1],
    stand_to_sit[0])
connect_frames(
    graph,
    "transitions[stand_to_sit, sit]",
    stand_to_sit[-1],
    sit[0])


Out[18]:
Transition
guidb8449c8ee4a84e5bab83d05388bb1a01
nametransitions[stand_to_sit, sit]
source.guid7f00ace6eca1457f9eb90cb657708a7c
source.image
target.guid61bf1365bb8b40cbac34d30f7b8c1ac5
target.image

In [21]:
sit_paw = Clip(graph, guid=name_to_guid("clip[sit_paw]"))
sit_paw


Out[21]:
Clip
preview

In [22]:
connect_frames(
    graph,
    "transitions[sit_paw, sit]",
    sit_paw[-1],
    sit[0])
connect_frames(
    graph,
    "transitions[sit, sit_paw]",
    sit[-1],
    sit_paw[0])


Out[22]:
Transition
guid1b42a000d15742a1a49e2e6f3e6193be
nametransitions[sit, sit_paw]
source.guid61bf1365bb8b40cbac34d30f7b8c1ac5
source.image
target.guidef8dafd0bb294811b1d16653e25db192
target.image

In [23]:
sit_to_crouch = Clip(graph, guid=name_to_guid("clip[sit_to_crouch]"))
connect_frames(
    graph,
    "transitions[sit, sit_to_crouch]",
    sit[-1],
    sit_to_crouch[0])


Out[23]:
Transition
guida4d39c1cca4742ba8d06cae39a6f1ea2
nametransitions[sit, sit_to_crouch]
source.guid61bf1365bb8b40cbac34d30f7b8c1ac5
source.image
target.guid92772921661b432988dfaeb4b7be75ed
target.image

In [24]:
crouch = Clip(graph, guid=name_to_guid("clip[crouch]"))
connect_frames(
    graph,
    "transitions[sit_to_crouch, crouch]",
    sit_to_crouch[-1],
    crouch[0])


Out[24]:
Transition
guid40305fe387364850bd5aa34faf0de5f1
nametransitions[sit_to_crouch, crouch]
source.guid4567bc3641bb410483b5478f32cf480a
source.image
target.guidf0465488f13445b991ec2bbf47f931d8
target.image

In [26]:
crouch_to_sit = Clip(graph, guid=name_to_guid("clip[crouch_to_sit]"))
connect_frames(
    graph,
    "transitions[crouch_to_sit, sit]",
    crouch[-1],
    crouch_to_sit[0])
connect_frames(
    graph,
    "transitions[crouch_to_sit, sit]",
    crouch_to_sit[-1],
    sit[0])


Out[26]:
Transition
guid41dc084c788044a083a963d649a58100
nametransitions[crouch_to_sit, sit]
source.guid9fc1ab9be057424b936ecdd10949bca9
source.image
target.guid61bf1365bb8b40cbac34d30f7b8c1ac5
target.image

In [27]:
find_absorbing_frames(graph)


Out[27]:
set()

In [30]:
graph.save()

In [ ]: