PageRenderTime 46ms CodeModel.GetById 18ms RepoModel.GetById 1ms app.codeStats 0ms

/src/train_predict_keras3.py

https://gitlab.com/tianzhou2011/talkingdata
Python | 157 lines | 125 code | 28 blank | 4 comment | 8 complexity | 7104ff39fc293e95068f9b5cad1bb906 MD5 | raw file
  1. #!/usr/bin/env python
  2. from __future__ import absolute_import, division, print_function
  3. from keras.callbacks import EarlyStopping
  4. from keras.models import Sequential
  5. from keras.layers.core import Dense, Dropout, Activation
  6. from keras.layers.normalization import BatchNormalization
  7. from keras.layers.advanced_activations import PReLU
  8. from keras.utils import np_utils
  9. from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
  10. from sklearn.cross_validation import StratifiedKFold
  11. from sklearn.metrics import log_loss
  12. import argparse
  13. import logging
  14. import numpy as np
  15. import os
  16. import pandas as pd
  17. import time
  18. from kaggler.data_io import load_data
  19. from const import N_CLASS, SEED
  20. np.random.seed(SEED)
  21. def batch_generator(X, y, batch_size, shuffle):
  22. #chenglong code for fiting from generator (https://www.kaggle.com/c/talkingdata-mobile-user-demographics/forums/t/22567/neural-network-for-sparse-matrices)
  23. number_of_batches = np.ceil(X.shape[0]/batch_size)
  24. counter = 0
  25. sample_index = np.arange(X.shape[0])
  26. if shuffle:
  27. np.random.shuffle(sample_index)
  28. while True:
  29. batch_index = sample_index[batch_size*counter:batch_size*(counter+1)]
  30. X_batch = X[batch_index,:].toarray()
  31. y_batch = y[batch_index]
  32. counter += 1
  33. yield X_batch, y_batch
  34. if (counter == number_of_batches):
  35. if shuffle:
  36. np.random.shuffle(sample_index)
  37. counter = 0
  38. def batch_generatorp(X, batch_size, shuffle):
  39. number_of_batches = X.shape[0] / np.ceil(X.shape[0]/batch_size)
  40. counter = 0
  41. sample_index = np.arange(X.shape[0])
  42. while True:
  43. batch_index = sample_index[batch_size * counter:batch_size * (counter + 1)]
  44. X_batch = X[batch_index, :].toarray()
  45. counter += 1
  46. yield X_batch
  47. if (counter == number_of_batches):
  48. counter = 0
  49. def baseline_model(dims):
  50. # create model
  51. model = Sequential()
  52. model.add(Dense(150, input_dim=dims, init='normal'))
  53. model.add(PReLU())
  54. model.add(Dropout(0.4))
  55. model.add(Dense(50, input_dim=dims, init='normal'))
  56. model.add(PReLU())
  57. model.add(Dropout(0.2))
  58. model.add(Dense(12, init='normal', activation='softmax'))
  59. # Compile model
  60. model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) #logloss
  61. return model
  62. def train_predict(train_file, test_file, predict_valid_file, predict_test_file,
  63. cv_id_file, n_est=100, neurons=512, dropout=0.5, batch=16,
  64. n_fold=5):
  65. feature_name = os.path.basename(train_file)[:-8]
  66. model_name = 'keras3_{}_{}_{}_{}_{}'.format(
  67. n_est, neurons, dropout, batch, feature_name
  68. )
  69. logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
  70. level=logging.DEBUG,
  71. filename='{}.log'.format(model_name))
  72. logging.info('Loading training and test data...')
  73. X, y = load_data(train_file)
  74. Y = np_utils.to_categorical(y)
  75. X_tst, _ = load_data(test_file)
  76. nb_classes = Y.shape[1]
  77. dims = X.shape[1]
  78. logging.info('{} classes, {} dims'.format(nb_classes, dims))
  79. logging.info('Loading CV Ids')
  80. cv_id = np.loadtxt(cv_id_file)
  81. P_val = np.zeros_like(Y)
  82. P_tst = np.zeros((X_tst.shape[0], nb_classes))
  83. for i in range(1, n_fold + 1):
  84. i_trn = np.where(cv_id != i)[0]
  85. i_val = np.where(cv_id == i)[0]
  86. logging.info('Training model #{}'.format(i))
  87. clf = baseline_model(dims)
  88. clf.fit_generator(generator=batch_generator(X[i_trn], Y[i_trn], 500, True),
  89. nb_epoch=16,
  90. samples_per_epoch=69984,
  91. validation_data=(X[i_val].todense(), Y[i_val]),
  92. verbose=2)
  93. P_val[i_val] = clf.predict_generator(generator=batch_generatorp(X[i_val], 500, False),
  94. val_samples=X[i_val].shape[0])
  95. logging.info('CV #{} Log Loss: {:.6f}'.format(i, log_loss(Y[i_val], P_val[i_val])))
  96. P_tst += clf.predict_generator(generator=batch_generatorp(X_tst, 800, False),
  97. val_samples=X_tst.shape[0]) / n_fold
  98. logging.info('Saving normalized validation predictions...')
  99. logging.info('CV Log Loss: {:.6f}'.format(log_loss(Y, P_val)))
  100. np.savetxt(predict_valid_file, P_val, fmt='%.6f', delimiter=',')
  101. logging.info('Saving normalized test predictions...')
  102. np.savetxt(predict_test_file, P_tst, fmt='%.6f', delimiter=',')
  103. if __name__ == '__main__':
  104. parser = argparse.ArgumentParser()
  105. parser.add_argument('--train-file', required=True, dest='train_file')
  106. parser.add_argument('--test-file', required=True, dest='test_file')
  107. parser.add_argument('--predict-valid-file', required=True,
  108. dest='predict_valid_file')
  109. parser.add_argument('--predict-test-file', required=True,
  110. dest='predict_test_file')
  111. parser.add_argument('--n-est', default=10, type=int, dest='n_est')
  112. parser.add_argument('--batch-size', default=64, type=int,
  113. dest='batch_size')
  114. parser.add_argument('--neurons', default=512, type=int)
  115. parser.add_argument('--dropout', default=0.5, type=float)
  116. parser.add_argument('--cv-id', required=True, dest='cv_id_file')
  117. args = parser.parse_args()
  118. start = time.time()
  119. train_predict(train_file=args.train_file,
  120. test_file=args.test_file,
  121. predict_valid_file=args.predict_valid_file,
  122. predict_test_file=args.predict_test_file,
  123. cv_id_file=args.cv_id_file,
  124. n_est=args.n_est,
  125. neurons=args.neurons,
  126. dropout=args.dropout,
  127. batch=args.batch_size)
  128. logging.info('finished ({:.2f} min elasped)'.format((time.time() - start) /
  129. 60))