In [1]:
from keras.preprocessing.video import video_to_array
from work.dataset.activitynet import ActivityNetDataset

VIDEOS_PATH = '/imatge/amontes/work/datasets/ActivityNet/v1.3/videos'
dataset = ActivityNetDataset(
    videos_path='../dataset/videos.json',
    labels_path='../dataset/labels.txt',
    stored_videos_path=VIDEOS_PATH,
    files_extension='mp4'
)


test_video = None
for video in dataset.videos:
    if video.video_id == '7Zd7KlliqQw':
        test_video = video
        break

instances = test_video.get_video_instances(length=16, overlap=0)

In [2]:
print(len(instances))


196

In [3]:
import time
import numpy as np

path = '/imatge/amontes/work/datasets/ActivityNet/v1.3/videos/7Zd7KlliqQw.mp4'
batch_size = 32
t1 = time.time()
bX = np.zeros((batch_size, 3, 16, 112, 112))
for i in range(batch_size):
    instance = instances[i]
    x = video_to_array(path, resize=(112, 112), start_frame=instance.start_frame, length=16)
    bX[i] = x.astype(np.float32)
    
print(bX.shape) 
t2 = time.time()
print('Last {} seconds.'.format(t2-t1))


(32, 3, 16, 112, 112)
Last 16.8919019699 seconds.

In [ ]:
for i in range(1, 10, 3):
    print(i)

In [ ]: