Set CUDA and python paths


In [1]:
RCNN_DIR = '/opt/py-faster-rcnn/'

import os
os.environ["PYTHONPATH"] = RCNN_DIR + 'caffe-fast-rcnn/python'

!sudo unlink /usr/local/cuda
!sudo ln -s /usr/local/cuda-7.5 /usr/local/cuda

Set paths for Faster RCNN


In [2]:
import os.path as osp
import sys

def add_path(path):
    if path not in sys.path:
        sys.path.insert(0, path)
        
caffe_path = osp.join(RCNN_DIR, 'caffe-fast-rcnn', 'python')
add_path(caffe_path)

lib_path = osp.join(RCNN_DIR, 'lib')
add_path(lib_path)

Import necessary libraries


In [3]:
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2

%matplotlib inline

Define network, classes, GPU, and other parameters


In [4]:
NET = 'vgg16'   # Options: vgg16, zf

GPU = 0   # Set to GPU ID

NUM_IMAGES = 2  # Options: 1 - a single image, 2 - an image set

IMG_DIR = '/home/qtb9744/data/test_images/'  # Set location of test images
CLASS_DIR = '/home/qtb9744/data/test_images/classified/'  # Set location for classified images

FILE_DISPLAY = 1   # Options: 0 - write to file, 1 - display to window

CLASSES = ('__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 
            'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 
            'dog', 'horse', 'motorbike', 'person', 
            'pottedplant', 'sheep' 'sofa', 'train', 'tvmonitor')

COLORS = ('green', 'green', 'yellow', 'green', 'green', 
            'green', 'red', 'red', 'green', 'green', 'green', 'green', 
            'green', 'green', 'yellow', 'blue', 
            'green', 'green' 'green', 'green', 'green')

NETS = {'vgg16': ('VGG16', 'VGG16_faster_rcnn_final.caffemodel'),
        'zf': ('ZF', 'ZF_faster_rcnn_final.caffemodel')}

Visualize detections made by network


In [5]:
def vis_detections(im, scores, boxes, im_name, conf_thresh=0.5, 
                   nms_thresh=0.4):
    """Draw detected bounding boxes."""
    im = im[:, :, (2, 1, 0)]
    fig, ax = plt.subplots(figsize=(12, 12))
    ax.imshow(im, aspect='equal')
    
    font0 = FontProperties()
    alignment = {'horizontalalignment': 'center', 
                 'verticalalignment': 'baseline'}
    
    font = font0.copy()
    font.set_weight('bold')
    font.set_size('x-large')
    
    col_idx = 0
    for cls_ind, cls in enumerate(CLASSES[1:]):
        if cls_ind == 6 or cls_ind == 14:
            cls_ind += 1  # because we skipped background
            cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
            cls_scores = scores[:, cls_ind]
            dets = np.hstack((cls_boxes, cls_scores[:, 
                                np.newaxis])).astype(np.float32)
            keep = nms(dets, nms_thresh)
            dets = dets[keep, :]
            inds = np.where(dets[:, -1] >= conf_thresh)[0]

            row_idx = 0
            if len(inds) != 0:
                row_idx += 1

                for i in inds:
                    bbox = dets[i, :4]
                    score = dets[i, -1]
                    ax.add_patch(plt.Rectangle((bbox[0], bbox[1]),
                        bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False,
                        edgecolor=COLORS[cls_ind], linewidth = 2.0))
                    row_idx += 1

                col_idx += 1
            
    plt.axis('off')
    plt.tight_layout()
    plt.draw()
    if FILE_DISPLAY == 0:
        out_file = os.path.join(CLASS_DIR, os.path.basename(im_name))
        plt.savefig(out_file)
    elif FILE_DISPLAY == 1:
        plt.show()
    plt.clf()
    plt.close()

Run demo with image(s)


In [6]:
def demo(net, im_name):
    """Detect object classes in an image using pre-computed object proposals"""
    
    # Load the test image
    im_file = os.path.join(IMG_DIR, im_name)
    im = cv2.imread(im_file)
    
    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im)
    timer.toc()
    print ('Detection took {:3f}s for '
           '{:d} object proposals\n').format(timer.total_time, boxes.shape[0])
    
    # Visualize detections for each class
    CONF_THRESH = 0.7
    NMS_THRESH = 0.3
    vis_detections(im, scores, boxes, im_name, CONF_THRESH, NMS_THRESH)

Main function


In [7]:
if __name__ == '__main__':
    cfg.TEST.HAS_RPN = True  # Use RPN for proposals
    
    prototxt = os.path.join(cfg.MODELS_DIR, NETS[NET][0],
                           'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
    caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
                             NETS[NET][1])
    
    if not os.path.isfile(caffemodel):
        raise IOError(('{:s} not found.\nDid you run ./data/scripts/'
                      'fetch_faster_rcnn_models.sh?').format(caffemodel))
        
    caffe.set_mode_gpu()
    caffe.set_device(GPU)
    cfg.GPU_ID = GPU
    net = caffe.Net(prototxt, caffemodel, caffe.TEST)
    
    print '\n\nLoaded network {:s}'.format(caffemodel)
    
    # Warmup on a dummy image
    im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
    for i in xrange(2):
        _, _ = im_detect(net, im)
    
    if NUM_IMAGES == 1:
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        IM_NAME = 'test_images1.png'
        im_name = IMG_DIR + IM_NAME
        demo(net, im_name)
    else:
        for im_name in sorted(os.listdir(IMG_DIR)):
            if im_name.endswith('.png'):
                print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
                demo(net, im_name)



Loaded network /opt/py-faster-rcnn/data/faster_rcnn_models/VGG16_faster_rcnn_final.caffemodel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Detection took 0.164214s for 262 object proposals

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Detection took 0.182177s for 300 object proposals

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Detection took 0.165302s for 300 object proposals

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Detection took 0.144684s for 300 object proposals

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Detection took 0.126602s for 300 object proposals


In [ ]: