In [ ]:
# Force matplotlib to use inline rendering
%matplotlib inline
import os
import sys
# add path to libraries for ipython
sys.path.append(os.path.expanduser("~/libs"))
import numpy as np
import tensorlight as light
import tensorflow as tf
In [ ]:
im1 = np.random.rand(64,64,1) * 255
im2 = np.random.rand(64,64,1)
im3 = np.random.rand(64,64,1)
im4 = np.random.rand(64,64,1)
images = [im1, im2, im3, im4]
light.utils.video.write_gif("out/gif-anim1-float.gif", images, fps=10)
In [ ]:
im1 = np.uint8(np.random.rand(64,64,1) * 255)
im2 = np.uint8(np.random.rand(64,64,1) * 255)
im3 = np.uint8(np.random.rand(64,64,1) * 255)
im4 = np.uint8(np.random.rand(64,64,1) * 255)
images = [im1, im2, im3, im4]
light.utils.video.write_gif("out/gif-anim1-uint8.gif", images, fps=10)
In [ ]:
seq = np.random.rand(8,64,64,3)
light.utils.video.write_gif("out/gif-anim2.gif", seq, fps=10)
In [ ]:
im1a = np.random.rand(64,64,3)
im2a = np.random.rand(64,64,3)
im3a = np.random.rand(64,64,3)
im4a = np.random.rand(64,64,3)
im1b = np.random.rand(64,64,3) * 0.5
im2b = np.random.rand(64,64,3) * 0.5
im3b = np.random.rand(64,64,3) * 0.5
images_a = [im1a, im2a, im3a, im4a]
images_b = [im1b, im2b, im3b]
light.utils.video.write_multi_gif("out/gif-anim3.gif", [images_a, images_b], fps=10)
In [ ]:
seq1 = np.random.rand(8,64,64,3) * 1.0
seq2 = np.random.rand(8,64,64,3) * 0.25
seq3 = np.random.rand(8,64,64,3) * 0.5
light.utils.video.write_multi_gif("out/gif-anim4.gif", [seq1, seq2, seq3], fps=10,
pad_value=1.0, pad_width=4)
In [ ]:
im1 = np.random.rand(64,64,1)
im2 = np.random.rand(64,64,1)
im3 = np.random.rand(64,64,1)
im4 = np.random.rand(64,64,1)
images = [im1, im2, im3, im4]
light.utils.video.write_image_sequence("out/img-seq1.png", images)
In [ ]:
seq = np.uint8(np.random.rand(8,64,64,3) * 255)
light.utils.video.write_image_sequence("out/img-seq2.png", seq)
In [ ]:
im1a = np.random.rand(64,64,3)
im2a = np.random.rand(64,64,3)
im3a = np.random.rand(64,64,3)
im4a = np.random.rand(64,64,3)
im1b = np.random.rand(64,64,3) * 0.7
im2b = np.random.rand(64,64,3) * 0.6
im3b = np.random.rand(64,64,3) * 0.5
images_a = [im1a, im2a, im3a, im4a]
images_b = [im1b, im2b, im3b]
light.utils.video.write_multi_image_sequence("out/img-seq3.png", [images_a, images_b])
In [ ]:
seq1 = np.random.rand(6,64,64,3)
seq2 = np.random.rand(7,64,64,3) * 0.5
seq3 = np.random.rand(8,64,64,3)
light.utils.video.write_multi_image_sequence("out/img-seq4.png", [seq1, seq2, seq3])
In [ ]:
VIDEO1 = "assets/predicted_moving-mnist.avi"
VIDEO2 = "assets/predicted_ucf11.avi"
VIDEO3 = "assets/v_JumpRope_g01_c01.avi"
In [ ]:
def print_vr_info(vr):
print("Length", vr.frames_length)
print("Current-FID", vr.frame_idx)
print("Frames Left", vr.frames_left)
In [ ]:
vr = light.utils.video.VideoReader(VIDEO3, start_frame=0)
print_vr_info(vr)
In [ ]:
frame = vr.next_frame()
light.visualization.display_array(frame)
print_vr_info(vr)
In [ ]:
vr.skip_frames(10)
print_vr_info(vr)
In [ ]:
vr.goto_frame(5)
print_vr_info(vr)
In [ ]:
vr.release()
In [ ]:
with light.utils.video.VideoWriter("out/test.avi") as writer:
rand_frames = np.random.randint(255, size=(100, 240, 320, 3), dtype=np.uint8)
writer.write(rand_frames)
In [ ]:
VIDEO = "assets/v_JumpRope_g01_c01.avi"
SEQ_LENGTH = 30
IMAGE_SIZE = [240,320, 3]
DO_DISTORTION = False
In [ ]:
count, seq_list = light.utils.data.preprocess_videos("assets", "_test", file_list=[VIDEO],
image_size=IMAGE_SIZE,
serialized_sequence_length=SEQ_LENGTH)
print("Count:", count)
print("Seq-Files", seq_list)
In [ ]:
def _read_record(filename_queue):
class FrameSeqRecord(object):
pass
record = FrameSeqRecord()
record.height = IMAGE_SIZE[0]
record.width = IMAGE_SIZE[1]
record.depth = IMAGE_SIZE[2]
frame_bytes = record.height * record.width * record.depth
record_bytes = frame_bytes * (SEQ_LENGTH)
total_file_bytes = frame_bytes * SEQ_LENGTH
with tf.name_scope('read_record'):
reader = tf.FixedLengthRecordReader(total_file_bytes)
record.key, value = reader.read(filename_queue)
decoded_record_bytes = tf.decode_raw(value, tf.uint8)
record.data = tf.reshape(decoded_record_bytes,
[SEQ_LENGTH, record.height, record.width, record.depth])
return record
In [ ]:
def get_batch(batch_size):
with tf.name_scope('preprocessing'):
filename_queue = tf.train.string_input_producer(seq_list)
seq_record = _read_record(filename_queue)
# convert to float of scale [0.0, 1.0]
seq_data = tf.cast(seq_record.data, tf.float32)
seq_data = seq_data / 255
if DO_DISTORTION:
with tf.name_scope('distortion'):
images_to_distort = tf.unpack(seq_data)
distorted_images = light.image.equal_random_distortion(images_to_distort)
sequence_inputs = tf.pack(distorted_images, axis=0)
else:
sequence_inputs = seq_data
return light.inputs.generate_batch(sequence_inputs, sequence_inputs,
batch_size,
16, 32,
shuffle=True, num_threads=4)
In [ ]:
with tf.device("/cpu:0"):
bx, _ = get_batch(4)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
print("Starting queue runners...")
x = sess.run(bx)
coord.request_stop()
coord.join(threads)
for i in range(x.shape[0]):
light.visualization.display_batch(x[i], ncols=5, nrows=2, title=str(i))
In [ ]:
binary_data = light.utils.image.read_as_binary("assets/test_30_240_320_3.seq")
print("Binary-shape:", binary_data.shape)
images = np.reshape(binary_data, (-1, 240, 320, 3))
print("Images-shape:", images.shape)
light.visualization.display_batch(images, ncols=5, nrows=2)
In [ ]: