scnets.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. from keras import backend as K
  2. from keras.models import Sequential, Model
  3. from keras.layers import Dense, Dropout
  4. from keras.layers import Reshape, UpSampling1D, Conv1D
  5. from keras.layers import Flatten, Activation
  6. from keras.utils import np_utils, multi_gpu_model
  7. from keras.regularizers import l2
  8. from keras.wrappers.scikit_learn import KerasRegressor
  9. from keras.optimizers import Adam
  10. import numpy as np
  11. #import matplotlib.pyplot as plt
  12. from keras.layers import PReLU
  13. from keras.models import Model
  14. from keras.layers import Input, Add
  15. from keras.layers.normalization import BatchNormalization
  16. from keras.layers import PReLU
  17. from keras.utils import to_channels_first
  18. # # #staging area for new models
  19. # def plot_training_history(history, red_factor):
  20. # loss, val_loss = history.history['loss'], history.history['val_loss']
  21. # loss = np.asarray(loss)/red_factor
  22. # val_loss = np.asarray(val_loss)/red_factor
  23. # epochs = len(loss)
  24. # fig, axs = plt.subplots(1,1, figsize=(5,5))
  25. # axs.semilogy(np.arange(1, epochs + 1), loss, label='train error')
  26. # axs.semilogy(np.arange(1, epochs + 1), val_loss, label='validation error')
  27. # axs.set_xlabel('Epoch number')
  28. # axs.set_ylabel('Mean Relative Error (MRE) (%)')
  29. # axs.legend(loc="best")
  30. #function to test performance on testset
  31. def calc_mre(y_true, y_pred):
  32. y_err = 100*np.abs(y_true - y_pred)/y_true
  33. return np.mean(y_err)
  34. #function to test performance on testset
  35. def calc_mre_K(y_true, y_pred):
  36. y_err = 100*K.abs(y_true - y_pred)/y_true
  37. return K.mean(y_err)
  38. #naive percentage loss
  39. def relerr_loss(y_true, y_pred):
  40. y_err = K.abs(y_true - y_pred)/y_true
  41. y_err_f = K.flatten(y_err)
  42. return K.sum(y_err_f)
  43. def fullycon( in_size=8,
  44. out_size=256,
  45. batch_size=32,
  46. N_hidden=3,
  47. N_neurons=250,
  48. N_gpus=1):
  49. """
  50. Returns a fully-connected model which will take a normalized size vector and return a
  51. spectrum
  52. in_size: length of the size vector
  53. out_size: length of the spectrum vector
  54. N_hidden: number of hidden layers
  55. N_neurons: number of neurons in each of the hidden layers
  56. """
  57. model = Sequential()
  58. model.add(Dense(N_neurons, input_dim=in_size,
  59. kernel_initializer='normal',
  60. name='first' ))
  61. model.add(Activation('relu'))
  62. for h in np.arange(N_hidden):
  63. lname = "H"+str(h)
  64. model.add(Dense(N_neurons,
  65. kernel_initializer='normal', name=lname ))
  66. model.add(Activation('relu'))
  67. model.add(Dense(out_size, kernel_initializer='normal', name='last'))
  68. # Compile model
  69. if N_gpus == 1:
  70. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K])
  71. else:
  72. gpu_list = ["gpu(%d)" % i for i in range(N_gpus)]
  73. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K], context = gpu_list)
  74. return model
  75. def conv1dmodel(in_size=8,
  76. out_size=256,
  77. batch_size=32,
  78. c1_nf=64,
  79. clayers=2,
  80. ker_size=3):
  81. # create model
  82. model = Sequential()
  83. model.add(Dense(out_size, input_dim=in_size,
  84. kernel_initializer='normal',
  85. name='first'))
  86. model.add(Activation('relu'))
  87. model.add(Reshape((4, 64), name='Reshape1'))
  88. model.add(UpSampling1D(size=2, name='Up1'))
  89. model.add(Conv1D(filters=c1_nf,
  90. kernel_size=ker_size, strides=1, padding='same',
  91. dilation_rate=1, name='Conv1',
  92. kernel_initializer='normal'))
  93. model.add(Activation('relu'))
  94. for cl in np.arange(clayers):
  95. model.add(Conv1D(filters=32,
  96. kernel_size=ker_size,
  97. strides=1,
  98. padding='same',
  99. dilation_rate=1,
  100. name='Conv'+ str(cl+2),
  101. kernel_initializer='normal'))
  102. model.add(Activation('relu'))
  103. model.add(Flatten())
  104. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K])
  105. return model
  106. def convprel(in_size=8,
  107. out_size=256,
  108. batch_size=32,
  109. c1_nf=64,
  110. clayers=2,
  111. ker_size=3):
  112. # create model
  113. model = Sequential()
  114. model.add(Dense(out_size, input_dim=in_size,
  115. kernel_initializer='normal',
  116. name='first'))
  117. model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None))
  118. model.add(Reshape((4, 64), name='Reshape1'))
  119. model.add(UpSampling1D(size=2, name='Up1'))
  120. model.add(Conv1D(filters=c1_nf,
  121. kernel_size=ker_size, strides=1, padding='same',
  122. dilation_rate=1, name='Conv1',
  123. kernel_initializer='normal'))
  124. model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None))
  125. for cl in np.arange(clayers):
  126. model.add(Conv1D(filters=32,
  127. kernel_size=ker_size,
  128. strides=1,
  129. padding='same',
  130. dilation_rate=1,
  131. name='Conv'+ str(cl+2),
  132. kernel_initializer='normal'))
  133. model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None))
  134. model.add(Flatten())
  135. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K])
  136. return model
  137. def resblock2(Input, ker_size):
  138. #Input = to_channels_first(Input)
  139. Output = Conv1D(filters=32, kernel_size=ker_size, strides=1, padding='same',
  140. dilation_rate=1,
  141. kernel_initializer='normal')(Input)
  142. #Output = BatchNormalization()(Output)
  143. Output = Activation('relu')(Output)
  144. Output = Conv1D(filters=32, kernel_size=ker_size, strides=1, padding='same',
  145. dilation_rate=1,
  146. kernel_initializer='normal')(Output)
  147. Output = Add()([Output, Input])
  148. return Output
  149. def resblock(Input, ker_size, red_dim):
  150. #Input = to_channels_first(Input)
  151. Output = Conv1D(filters=red_dim, kernel_size=1, strides=1, padding='same',
  152. dilation_rate=1,
  153. kernel_initializer='normal')(Input)
  154. #Output = BatchNormalization()(Output)
  155. Output = Activation('relu')(Output)
  156. Output = Conv1D(filters=red_dim, kernel_size=ker_size, strides=1, padding='same',
  157. dilation_rate=1,
  158. kernel_initializer='normal')(Output)
  159. #Output = BatchNormalization()(Output)
  160. Output = Activation('relu')(Output)
  161. Output = Conv1D(filters=32, kernel_size=1, strides=1, padding='same',
  162. dilation_rate=1,
  163. kernel_initializer='normal')(Output)
  164. #Output = BatchNormalization()(Output)
  165. Output = Add()([Output, Input])
  166. Output = PReLU(alpha_initializer='zeros', alpha_regularizer=None)(Output)
  167. return Output
  168. def resnet(in_size=8,
  169. out_size=256,
  170. num_units=2,
  171. red_dim=8,
  172. gpu_id=0,
  173. batch_size=32,
  174. ker_size=3):
  175. gpu_list = ["gpu(%d)" % gpu_id]
  176. a = Input(shape=(in_size,))
  177. first = Dense(256, kernel_initializer='normal')(a)
  178. first = PReLU(alpha_initializer='zeros', alpha_regularizer=None)(first)
  179. first = Reshape((8,32))(first)
  180. for units in np.arange(num_units):
  181. #first = resblock(first, ker_size, red_dim)
  182. first = resblock2(first, ker_size)
  183. last = Flatten()(first)
  184. #last = Dense(256, kernel_initializer='normal')(first)
  185. model = Model(inputs=a, outputs=last)
  186. #compile model
  187. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K], context = gpu_list)
  188. return model