scnets.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. from keras import backend as K
  2. from keras.models import Sequential, Model
  3. from keras.layers import Dense, Dropout
  4. from keras.layers import Reshape, UpSampling1D, Conv1D
  5. from keras.layers import Flatten, Activation
  6. from keras.utils import np_utils, multi_gpu_model
  7. from keras.regularizers import l2
  8. from keras.wrappers.scikit_learn import KerasRegressor
  9. from keras.optimizers import Adam
  10. import numpy as np
  11. import matplotlib.pyplot as plt
  12. from keras.layers import PReLU
  13. from keras.models import Model
  14. from keras.layers import Input, Add
  15. from keras.layers.normalization import BatchNormalization
  16. from keras.layers import PReLU
  17. from keras.utils import to_channels_first
  18. #staging area for new models
  19. def plot_training_history(history, red_factor):
  20. loss, val_loss = history.history['loss'], history.history['val_loss']
  21. loss = np.asarray(loss)/red_factor
  22. val_loss = np.asarray(val_loss)/red_factor
  23. epochs = len(loss)
  24. fig, axs = plt.subplots(1,1, figsize=(5,5))
  25. axs.semilogy(np.arange(1, epochs + 1), loss, label='train error')
  26. axs.semilogy(np.arange(1, epochs + 1), val_loss, label='validation error')
  27. axs.set_xlabel('Epoch number')
  28. axs.set_ylabel('Mean Relative Error (MRE) (%)')
  29. axs.legend(loc="best")
  30. #function to test performance on testset
  31. def calc_mre(y_true, y_pred):
  32. y_err = 100*np.abs(y_true - y_pred)/y_true
  33. return np.mean(y_err)
  34. #function to test performance on testset
  35. def calc_mre_K(y_true, y_pred):
  36. y_err = 100*np.abs(y_true - y_pred)/y_true
  37. return K.mean(y_err)
  38. #naive percentage loss
  39. def relerr_loss(y_true, y_pred):
  40. y_err = np.abs(y_true - y_pred)/y_true
  41. y_err_f = K.flatten(y_err)
  42. return K.sum(y_err_f)
  43. def fullycon( in_size=8,
  44. out_size=256,
  45. batch_size=32,
  46. N_hidden=3,
  47. N_neurons=250,
  48. N_gpus=1):
  49. """
  50. Returns a fully-connected model which will take a normalized size vector and return a
  51. spectrum
  52. in_size: length of the size vector
  53. out_size: length of the spectrum vector
  54. N_hidden: number of hidden layers
  55. N_neurons: number of neurons in each of the hidden layers
  56. """
  57. model = Sequential()
  58. model.add(Dense(N_neurons, input_dim=in_size,
  59. kernel_initializer='normal', activation='relu',
  60. name='first' ))
  61. for h in np.arange(N_hidden):
  62. lname = "H"+str(h)
  63. model.add(Dense(N_neurons,
  64. kernel_initializer='normal', activation='relu', name=lname ))
  65. model.add(Dense(out_size, kernel_initializer='normal', name='last'))
  66. # Compile model
  67. if N_gpus == 1:
  68. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K])
  69. else:
  70. gpu_list = ["gpu(%d)" % i for i in range(N_gpus)]
  71. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K], context = gpu_list)
  72. return model
  73. def conv1dmodel(in_size=8,
  74. out_size=256,
  75. batch_size=32,
  76. c1_nf=64,
  77. clayers=2,
  78. ker_size=3):
  79. # create model
  80. model = Sequential()
  81. model.add(Dense(out_size, input_dim=in_size,
  82. kernel_initializer='normal',
  83. name='first', activation='relu' ))
  84. model.add(Reshape((4, 64), name='Reshape1'))
  85. model.add(UpSampling1D(size=2, name='Up1'))
  86. model.add(Conv1D(filters=c1_nf,
  87. kernel_size=ker_size, strides=1, padding='same',
  88. dilation_rate=1, name='Conv1',
  89. kernel_initializer='normal', activation='relu'))
  90. for cl in np.arange(clayers):
  91. model.add(Conv1D(filters=32,
  92. kernel_size=ker_size,
  93. strides=1,
  94. padding='same',
  95. dilation_rate=1,
  96. name='Conv'+ str(cl+2),
  97. kernel_initializer='normal',
  98. activation='relu'))
  99. model.add(Flatten())
  100. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K])
  101. return model
  102. def convprel(in_size=8,
  103. out_size=256,
  104. batch_size=32,
  105. c1_nf=64,
  106. clayers=2,
  107. ker_size=3):
  108. # create model
  109. model = Sequential()
  110. model.add(Dense(out_size, input_dim=in_size,
  111. kernel_initializer='normal',
  112. name='first'))
  113. model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None))
  114. model.add(Reshape((4, 64), name='Reshape1'))
  115. model.add(UpSampling1D(size=2, name='Up1'))
  116. model.add(Conv1D(filters=c1_nf,
  117. kernel_size=ker_size, strides=1, padding='same',
  118. dilation_rate=1, name='Conv1',
  119. kernel_initializer='normal'))
  120. model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None))
  121. for cl in np.arange(clayers):
  122. model.add(Conv1D(filters=32,
  123. kernel_size=ker_size,
  124. strides=1,
  125. padding='same',
  126. dilation_rate=1,
  127. name='Conv'+ str(cl+2),
  128. kernel_initializer='normal'))
  129. model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None))
  130. model.add(Flatten())
  131. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K])
  132. return model
  133. def resblock2(Input):
  134. #Input = to_channels_first(Input)
  135. Output = Conv1D(filters=32, kernel_size=3, strides=1, padding='same',
  136. dilation_rate=1,
  137. kernel_initializer='normal')(Input)
  138. #Output = BatchNormalization()(Output)
  139. Output = Activation('relu')(Output)
  140. Output = Conv1D(filters=32, kernel_size=3, strides=1, padding='same',
  141. dilation_rate=1,
  142. kernel_initializer='normal')(Output)
  143. Output = Add()([Output, Input])
  144. return Output
  145. def resblock(Input, ker_size, red_dim):
  146. #Input = to_channels_first(Input)
  147. Output = Conv1D(filters=red_dim, kernel_size=1, strides=1, padding='same',
  148. dilation_rate=1,
  149. kernel_initializer='normal')(Input)
  150. Output = BatchNormalization()(Output)
  151. Output = Activation('relu')(Output)
  152. Output = Conv1D(filters=red_dim, kernel_size=ker_size, strides=1, padding='same',
  153. dilation_rate=1,
  154. kernel_initializer='normal')(Output)
  155. Output = BatchNormalization()(Output)
  156. Output = Activation('relu')(Output)
  157. Output = Conv1D(filters=32, kernel_size=1, strides=1, padding='same',
  158. dilation_rate=1,
  159. kernel_initializer='normal')(Output)
  160. Output = BatchNormalization()(Output)
  161. Output = Add()([Output, Input])
  162. Output = PReLU(alpha_initializer='zeros', alpha_regularizer=None)(Output)
  163. return Output
  164. def resnet(in_size=8,
  165. out_size=256,
  166. num_units=2,
  167. red_dim=8,
  168. batch_size=32,
  169. ker_size=3):
  170. a = Input(shape=(in_size,))
  171. first = Dense(256, kernel_initializer='normal')(a)
  172. first = PReLU(alpha_initializer='zeros', alpha_regularizer=None)(first)
  173. first = Reshape((8,32))(first)
  174. for units in np.arange(num_units):
  175. first = resblock(first, ker_size, red_dim)
  176. last = Flatten()(first)
  177. model = Model(inputs=a, outputs=last)
  178. #compile model
  179. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K])
  180. return model