Sucran
9/10/2017 - 12:06 PM

PyCaffe examing network

# each output is (batch size, feature dim, spatial dim)
[(k, v.data.shape) for k, v in solver.net.blobs.items()]

#output
#[('data', (64, 1, 28, 28)),
# ('label', (64,)),
# ('conv1', (64, 20, 24, 24)),
# ('pool1', (64, 20, 12, 12)),
# ('conv2', (64, 50, 8, 8)),
# ('pool2', (64, 50, 4, 4)),
# ('fc1', (64, 500)),
# ('score', (64, 10)),
# ('loss', ())]

# just print the weight sizes (we'll omit the biases)
[(k, v[0].data.shape) for k, v in solver.net.params.items()]

#output
#[('conv1', (20, 1, 5, 5)),
# ('conv2', (50, 20, 5, 5)),
# ('fc1', (500, 800)),
# ('score', (10, 500))]#
feat = net.blobs['prob'].data[0]
plt.figure(figsize=(15, 3))
plt.plot(feat.flat)
feat = net.blobs['fc6'].data[0]
plt.subplot(2, 1, 1)
plt.plot(feat.flat)
plt.subplot(2, 1, 2)
_ = plt.hist(feat.flat[feat.flat > 0], bins=100)

for layer_name, blob in net.blobs.iteritems():
    print layer_name + '\t' + str(blob.data.shape)
    
for layer_name, param in net.params.iteritems():
    print layer_name + '\t' + str(param[0].data.shape), str(param[1].data.shape)