Search code examples
pythonnumpyoptimizationcaffepycaffe

Caffe feature extraction is too slow? caffe.Classifier or caffe.Net


I have trained a model with images. And now would like to extract the fc-6 features to .npy files. I'm using caffe.set_mode_gpu()to run the caffe.Classifier and extract the features.

Instead of extracting and saving the feature per frame. I save all the features of a folder to a temp variable and the result of the complete video to a npy file(decreasing the number of write operations to disk).

I have also heard that I could use the Caffe.Net and then pass a batch of images. But I'm not sure of what preprocessing has to be done and if this is faster ?

import os
import shutil
import sys
import glob
from multiprocessing import Pool
import numpy as np
import os, sys, getopt
import time


def keep_fldrs(path,listr):
    ll =list()
    for x in listr:
        if os.path.isdir(path+x):
            ll.append(x)
    return ll

def keep_img(path,listr):
    ll = list()
    for x in listr:
        if os.path.isfile(path+str(x)) & str(x).endswith('.jpg'):
            ll.append(x)
    return ll
def ifdir(path):
    if not os.path.isdir(path):
        os.makedirs(path)

# Main path to your caffe installation
caffe_root = '/home/anilil/projects/lstm/lisa-caffe-public/python'

# Model prototxt file
model_prototxt = '/home/anilil/projects/caffe2tensorflow/deploy_singleFrame.prototxt'

# Model caffemodel file
model_trained = '/home/anilil/projects/caffe2tensorflow/snapshots_singleFrame_flow_v2_iter_55000.caffemodel'
sys.path.insert(0, caffe_root)
import caffe
caffe.set_mode_gpu()
net = caffe.Classifier(model_prototxt, model_trained,
                           mean=np.array([128, 128, 128]),
                           channel_swap=(2,1,0),
                           raw_scale=255,
                           image_dims=(255, 255))

Root='/media/anilil/Data/Datasets/UCf_scales/ori_mv_vis/Ori_MV/'
Out_fldr='/media/anilil/Data/Datasets/UCf_scales/ori_mv_vis/feat_fc6/'
allcalsses=keep_fldrs(Root,os.listdir(Root))
for classin in allcalsses:
    temp_class=Root+classin+'/'
    temp_out_class=Out_fldr+classin+'/'
    ifdir(temp_out_class)
    allvids_folders=keep_fldrs(temp_class,os.listdir(temp_class))
    for each_vid_fldr in allvids_folders:
        temp_pres_dir=temp_class+each_vid_fldr+'/'
        temp_out_pres_dir=temp_out_class+each_vid_fldr+'/'
        ifdir(temp_out_pres_dir)
        all_images=keep_img(temp_pres_dir,os.listdir(temp_pres_dir))
        frameno=0
        if os.path.isfile(temp_out_pres_dir+'video.npy'):
            continue
        start = time.time()
        temp_npy= np.ndarray((len(all_images),4096),dtype=np.float32)
        for each_image in all_images:
            input_image = caffe.io.load_image(temp_pres_dir+each_image)
            prediction = net.predict([input_image],oversample=False)
            temp_npy[frameno,:]=net.blobs['fc6'].data[0]
            frameno=frameno+1
        np.save(temp_out_pres_dir+'video.npy',temp_npy)
        end = time.time()
        print "lenght of imgs {} and time taken is {}".format(len(all_images),(end - start))
    print ('Class {} done'.format(classin))

Output

lenght of imgs 426 and time taken is 388.539139032
lenght of imgs 203 and time taken is 185.467905998

Time needed per image Around 0.9 Seconds now-


Solution

  • I found the best answer here in this post.

    Till now I had used a

    net = caffe.Classifier(model_prototxt, model_trained,
                               mean=np.array([128, 128, 128]),
                               channel_swap=(2,1,0),
                               raw_scale=255,
                               image_dims=(255, 255))
    

    to initialize a model and get the output per image. But this method is really slow and requires around .9 seconds per image.

    The best Idea is to pass a batch of images(maybe 100,200,250) changing. Depending on how much memory you have on your GPU.

    for this I set caffe.set_mode_gpu() as I have one and It's faster when you send large batches. Initialize the model with ur trained model.

    net=caffe.Net(model_prototxt,model_trained,caffe.TEST)
    

    Create a Transformer and make sure to set mean and other values depending on how u trained your model.

    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_transpose('data', (2,0,1)) # height*width*channel -> channel*height*width
    mean_file = np.array([128, 128, 128])
    transformer.set_mean('data', mean_file) #### subtract mean ####
    transformer.set_raw_scale('data', 255) # pixel value range
    transformer.set_channel_swap('data', (2,1,0)) # RGB -> BGR
    data_blob_shape = net.blobs['data'].data.shape
    data_blob_shape = list(data_blob_shape)
    

    Read a group of images and convert to the network input.

    net.blobs['data'].reshape(len(all_images), data_blob_shape[1], data_blob_shape[2], data_blob_shape[3])
    images = [temp_pres_dir+str(x) for x in all_images]
    net.blobs['data'].data[...] = map(lambda x: 
    transformer.preprocess('data',caffe.io.load_image(x)), images)
    

    Pass the batch of images through network.

    out = net.forward()
    

    You can use this output as you wish.

    Speed for each image is now 20 msec