scnets.py 3.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. from keras import backend as K
  2. from keras.models import Sequential, Model
  3. from keras.layers import Dense, Dropout
  4. from keras.layers import Reshape, UpSampling1D, Conv1D
  5. from keras.layers import Flatten, Activation
  6. from keras.utils import np_utils, multi_gpu_model
  7. from keras.regularizers import l2
  8. from keras.wrappers.scikit_learn import KerasRegressor
  9. from keras.optimizers import Adam
  10. import numpy as np
  11. import matplotlib.pyplot as plt
  12. #function to test performance on testset
  13. def calc_mre(y_true, y_pred):
  14. y_err = 100*np.abs(y_true - y_pred)/y_true
  15. return np.mean(y_err)
  16. #function to test performance on testset
  17. def calc_mre_K(y_true, y_pred):
  18. y_err = 100*np.abs(y_true - y_pred)/y_true
  19. return K.mean(y_err)
  20. #naive percentage loss
  21. def relerr_loss(y_true, y_pred):
  22. y_err = np.abs(y_true - y_pred)/y_true
  23. y_err_f = K.flatten(y_err)
  24. return K.sum(y_err_f)
  25. def conv1dmodel(in_size=8, out_size=256, ker_size=3):
  26. # create model
  27. model = Sequential()
  28. model.add(Dense(out_size, input_dim=in_size,
  29. kernel_initializer='normal',
  30. name='first' ))
  31. model.add(Activation('relu'))
  32. model.add(Reshape((4, 64), name='Reshape1'))
  33. model.add(UpSampling1D(size=2, name='Up1'))
  34. model.add(Conv1D(filters=64,
  35. kernel_size=ker_size, strides=1, padding='same',
  36. dilation_rate=1, name='Conv1',
  37. kernel_initializer='normal'))
  38. model.add(Activation('relu'))
  39. model.add(Conv1D(filters=32,
  40. kernel_size=ker_size, strides=1, padding='same',
  41. dilation_rate=1, name='Conv2',
  42. kernel_initializer='normal'))
  43. model.add(Activation('relu'))
  44. model.add(Flatten())
  45. model.compile(loss=relerr_loss, optimizer='adam', metrics=['accuracy', calc_mre_K])
  46. return model
  47. def fullycon( in_size=8, out_size=250, N_hidden=3, N_neurons=250, N_gpus=1):
  48. """
  49. Returns a fully-connected model which will take a normalized size vector and return a
  50. spectrum
  51. in_size: length of the size vector
  52. out_size: length of the spectrum vector
  53. N_hidden: number of hidden layers
  54. N_neurons: number of neurons in each of the hidden layers
  55. """
  56. model = Sequential()
  57. model.add(Dense(out_size, input_dim=in_size, kernel_initializer='normal', activation='relu',
  58. name='first' ))
  59. for h in np.arange(N_hidden):
  60. lname = "H"+str(h)
  61. model.add(Dense(out_size, kernel_initializer='normal', activation='relu', name=lname ))
  62. model.add(Dense(out_size, kernel_initializer='normal', name='last'))
  63. # Compile model
  64. if N_gpus == 1:
  65. model.compile(loss=relerr_loss, optimizer='adam', metrics=['accuracy'])
  66. else:
  67. gpu_list = ["gpu(%d)" % i for i in range(N_gpus)]
  68. model.compile(loss=relerr_loss, optimizer='adam', metrics=['accuracy'], context = gpu_list)
  69. return model
  70. #staging area for new models
  71. def plot_training_history(history, red_factor):
  72. loss, val_loss = history.history['loss'], history.history['val_loss']
  73. loss = np.asarray(loss)/red_factor
  74. val_loss = np.asarray(val_loss)/red_factor
  75. epochs = len(loss)
  76. fig, axs = plt.subplots(1,1, figsize=(5,5))
  77. axs.semilogy(np.arange(1, epochs + 1), loss, label='train error')
  78. axs.semilogy(np.arange(1, epochs + 1), val_loss, label='validation error')
  79. axs.set_xlabel('Epoch number')
  80. axs.set_ylabel('Mean Relative Error (MRE) (%)')
  81. axs.legend(loc="best")