fabsta
11/23/2017 - 6:48 PM

[Kaggle tipps] useful kaggle tips collected along the way #deeplearning

[Kaggle tipps] useful kaggle tips collected along the way #deeplearning

Input

array([[  1.9247e-01,   7.2496e-04,   3.7586e-05,   2.4820e-05,   8.0483e-01,   1.4839e-03,
          3.4440e-06,   4.3349e-04],
       [  7.4949e-02,   2.5567e-04,   9.0141e-05,   2.7097e-04,   3.8967e-01,   8.0172e-04,
          4.2277e-04,   5.3354e-01],
       [  7.3892e-02,   8.5835e-04,   4.3923e-05,   8.5646e-04,   4.6396e-01,   4.9485e-05,
          1.5451e-03,   4.5879e-01],
       [  8.8657e-01,   2.1959e-03,   9.6101e-05,   3.6997e-04,   6.2324e-02,   1.6894e-05,
          3.1924e-05,   4.8398e-02]], dtype=float32)

Code

def do_clip(arr, mx): return np.clip(arr, (1-mx)/7, mx)
preds = bn_model.predict(conv_test_feat, batch_size=batch_size*2)
subm = do_clip(preds,0.82)
classes = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']
submission = pd.DataFrame(subm, columns=classes)
submission.insert(0, 'image', raw_test_filenames)
submission.head()

Code

    image   ALB BET DOL LAG NoF OTHER   SHARK   YFT
0   image_04029.jpg 0.192466    0.025714    0.025714    0.025714    0.804826    0.025714    0.025714    0.025714
1   image_11167.jpg 0.074949    0.025714    0.025714    0.025714    0.389672    0.025714    0.025714    0.533538
2   image_06535.jpg 0.073892    0.025714    0.025714    0.025714    0.463964    0.025714    0.025714    0.458791
3   image_06547.jpg 0.820000    0.025714    0.025714    0.025714    0.062324    0.025714    0.025714    0.048398
4   image_03864.jpg 0.820000    0.025714    0.025714    0.025714    0.088146    0.025714    0.025714    0.049266
for category in CATEGORIES:
    print('{} {} images'.format(category, len(os.listdir(os.path.join(train_dir, category)))))

Run Extra Epochs We'll define a simple function for fitting models, just to save a little typing... In [23]:

def fit_model(model, batches, val_batches, nb_epoch=1):
    model.fit_generator(batches, samples_per_epoch=batches.nb_sample, nb_epoch=nb_epoch, 
                        validation_data=val_batches, nb_val_samples=val_batches.nb_sample)

In [24]:

fit_model(model, batches, val_batches, nb_epoch=1)

Epoch 1/1 23000/23000 [==============================] - 663s - loss: 0.5174 - acc: 0.9671 - val_loss: 0.6724 - val_acc: 0.9570 In [29]:

model.save_weights(model_path+'dogs-cats-redux-model_2.h5')

Change learning rate from 0.1 to 0.01 In [30]:

model.optimizer.lr = 0.01

In [31]:

fit_model(model, batches, val_batches, nb_epoch=1)

Epoch 1/1 23000/23000 [==============================] - 640s - loss: 0.5174 - acc: 0.9672 - val_loss: 0.3836 - val_acc: 0.9760 In [32]: #model.save_weights(model_path+'dogs-cats-redux-model_3.h5') In [ ]:

fit_model(model, batches, val_batches, nb_epoch=1)
import bcolz
def save_array(fname, arr): c=bcolz.carray(arr, rootdir=fname, mode='w'):c.flush()
def load_array(fname): return bcolz.open(fname)[:]