eval.py Deploying the classifier on the testing dataset
"""Description
"""
from __future__ import print_function
import lmdb
import caffe
import numpy as np
import csv
__author__ = 'PedroMorgado'
def load_lmdb(fn):
env = lmdb.open(fn, readonly=True)
datum = caffe.proto.caffe_pb2.Datum()
with env.begin() as txn:
cursor = txn.cursor()
data, labels = [], []
for _, value in cursor:
datum.ParseFromString(value)
labels.append(datum.label)
data.append(caffe.io.datum_to_array(datum).squeeze())
env.close()
return data, labels
def prep_image(img):
img = img.astype(float)[:, 14:241, 14:241] # center crop (img is in shape [C, X, Y])
img -= np.array([104., 117., 123.]).reshape((3,1,1)) # demean (same as in trainval.prototxt
return img
def main(test_data, num_class):
gpu_id = 1
images, labels = load_lmdb(test_data)
caffe.set_mode_gpu()
#caffe.set_device(gpu_id)
deploy = caffe.Net('caffenet/deploy.prototxt', caffe.TEST, weights='model.caffemodel')
probs = []
for i in range(0, len(images), 25):
batch = [prep_image(img) for img in images[i:i+25]]
batch_size = len(batch)
deploy.blobs['data'].data[:batch_size] = batch
deploy.forward()
probs.append(np.copy(deploy.blobs['prob'].data[:batch_size, :])) # Note np.copy. Otherwise, next forward() step will replace memory
print ('probs list length:', len(probs))
print ('probs element type:', type(probs[0]))
print (probs[0])
probs = np.concatenate(probs, 0)
print ('probs shape after concatenate:', probs.shape)
print (probs[0,:], type(probs[0,0]))
# compute accuracy
predictions = probs.argmax(1)
gtruth = np.array(labels)
total_accu = (predictions == gtruth).mean()*100
print ('predictions shape:', predictions.shape)
print (predictions[0:25])
print('Total Accuracy', total_accu)
# write results to a txt file
results_txt = open('Image_preds.txt', 'w')
for i in range(len(gtruth)):
results_txt.write(str(probs[i,0])+' '+str(probs[i,1]))
results_txt.write(' ')
results_txt.write(str(predictions[i]))
results_txt.write('\n')
results_txt.close()
# compute confusion matrix
class_count = np.zeros((num_class, 1)) # 1st col is number of images in each class
pred_count = np.zeros((num_class, num_class)) # each row is for one class, each col is the num of pred from row class to one of the classes
for i in range(len(gtruth)):
class_count[gtruth[i],0] += 1.
pred_count[gtruth[i],predictions[i]] += 1.
confusion_mat = np.zeros((num_class, num_class))
for i in range(num_class):
confusion_mat[i,:] = (pred_count[i,:])/class_count[i,0]
confusion_mat = np.around(confusion_mat, decimals=4)
'''
print ('Prediction Results:')
print (pred_count)
print ('Confusion Matrix:')
print (confusion_mat*100)
'''
filename = test_data+'_Results.csv'
outfile = open(filename, 'wb')
writer = csv.writer(outfile, delimiter=",")
writer.writerow(['Class classification on the different specimen'])
writer.writerow(['Total Accuracy'])
writer.writerow([str(total_accu)])
writer.writerow(['Prediction Results:'])
np.savetxt(outfile, pred_count, delimiter=",")
writer.writerow(['Confusion Matrix:'])
np.savetxt(outfile, confusion_mat*100, delimiter=",")
print ('Print to', filename, 'file successful.')
if __name__ == '__main__':
main('test1.LMDB', 2)