scnets.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. from keras import backend as K
  2. from keras.models import Sequential, Model
  3. from keras.layers import Dense, Dropout
  4. from keras.layers import Reshape, UpSampling1D, Conv1D
  5. from keras.layers import Flatten, Activation
  6. from keras.utils import np_utils, multi_gpu_model
  7. from keras.regularizers import l2
  8. from keras.wrappers.scikit_learn import KerasRegressor
  9. from keras.optimizers import Adam
  10. import numpy as np
  11. #import matplotlib.pyplot as plt
  12. from keras.layers import PReLU
  13. from keras.models import Model
  14. from keras.layers import Input, Add
  15. from keras.layers.normalization import BatchNormalization
  16. from keras.layers import PReLU
  17. from keras.utils import to_channels_first
  18. # # #staging area for new models
  19. # def plot_training_history(history, red_factor):
  20. # loss, val_loss = history.history['loss'], history.history['val_loss']
  21. # loss = np.asarray(loss)/red_factor
  22. # val_loss = np.asarray(val_loss)/red_factor
  23. # epochs = len(loss)
  24. # fig, axs = plt.subplots(1,1, figsize=(5,5))
  25. # axs.semilogy(np.arange(1, epochs + 1), loss, label='train error')
  26. # axs.semilogy(np.arange(1, epochs + 1), val_loss, label='validation error')
  27. # axs.set_xlabel('Epoch number')
  28. # axs.set_ylabel('Mean Relative Error (MRE) (%)')
  29. # axs.legend(loc="best")
  30. #function to test performance on testset
  31. def calc_mre(y_true, y_pred):
  32. y_err = 100*np.abs(y_true - y_pred)/y_true
  33. return np.mean(y_err)
  34. #function to test performance on testset
  35. def calc_mre_K(y_true, y_pred):
  36. y_err = K.abs(y_true - y_pred)
  37. return K.mean(y_err)
  38. #naive percentage loss
  39. def relerr_loss(y_true, y_pred):
  40. y_err = K.abs(y_true - y_pred)
  41. y_err_f = K.flatten(y_err)
  42. return K.sum(y_err_f)
  43. def fullycon( in_size=8,
  44. out_size=256,
  45. batch_size=32,
  46. N_hidden=3,
  47. N_neurons=256,
  48. N_gpus=1):
  49. """
  50. Returns a fully-connected model which will take a normalized size vector and return a
  51. spectrum
  52. in_size: length of the size vector
  53. out_size: length of the spectrum vector
  54. N_hidden: number of hidden layers
  55. N_neurons: number of neurons in each of the hidden layers
  56. """
  57. model = Sequential()
  58. model.add(Dense(N_neurons, input_dim=in_size,
  59. kernel_initializer='normal',
  60. name='first' ))
  61. model.add(Activation('relu'))
  62. for h in np.arange(N_hidden):
  63. lname = "H"+str(h)
  64. model.add(Dense(N_neurons,
  65. kernel_initializer='normal', name=lname ))
  66. model.add(Activation('relu'))
  67. model.add(Dense(out_size, kernel_initializer='normal', name='last'))
  68. model.add(Activation('relu'))
  69. # Compile model
  70. if N_gpus == 1:
  71. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K])
  72. else:
  73. gpu_list = ["gpu(%d)" % i for i in range(N_gpus)]
  74. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K], context = gpu_list)
  75. return model
  76. def conv1dmodel(in_size=8,
  77. out_size=256,
  78. batch_size=32,
  79. c1_nf=64,
  80. clayers=2,
  81. ker_size=3):
  82. # create model
  83. model = Sequential()
  84. model.add(Dense(out_size, input_dim=in_size,
  85. kernel_initializer='normal',
  86. name='first'))
  87. model.add(Activation('relu'))
  88. model.add(Reshape((4, 64), name='Reshape1'))
  89. model.add(UpSampling1D(size=2, name='Up1'))
  90. model.add(Conv1D(filters=c1_nf,
  91. kernel_size=ker_size, strides=1, padding='same',
  92. dilation_rate=1, name='Conv1',
  93. kernel_initializer='normal'))
  94. model.add(Activation('relu'))
  95. for cl in np.arange(clayers):
  96. model.add(Conv1D(filters=32,
  97. kernel_size=ker_size,
  98. strides=1,
  99. padding='same',
  100. dilation_rate=1,
  101. name='Conv'+ str(cl+2),
  102. kernel_initializer='normal'))
  103. model.add(Activation('relu'))
  104. model.add(Flatten())
  105. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K])
  106. return model
  107. def convprel(in_size=8,
  108. out_size=256,
  109. batch_size=32,
  110. c1_nf=64,
  111. clayers=2,
  112. ker_size=3):
  113. # create model
  114. model = Sequential()
  115. model.add(Dense(out_size, input_dim=in_size,
  116. kernel_initializer='normal',
  117. name='first'))
  118. model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None))
  119. model.add(Reshape((4, 64), name='Reshape1'))
  120. model.add(UpSampling1D(size=2, name='Up1'))
  121. model.add(Conv1D(filters=c1_nf,
  122. kernel_size=ker_size, strides=1, padding='same',
  123. dilation_rate=1, name='Conv1',
  124. kernel_initializer='normal'))
  125. model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None))
  126. for cl in np.arange(clayers):
  127. model.add(Conv1D(filters=32,
  128. kernel_size=ker_size,
  129. strides=1,
  130. padding='same',
  131. dilation_rate=1,
  132. name='Conv'+ str(cl+2),
  133. kernel_initializer='normal'))
  134. model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None))
  135. model.add(Flatten())
  136. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K])
  137. return model
  138. def resblock2(Input, ker_size):
  139. #Input = to_channels_first(Input)
  140. Output = Conv1D(filters=32, kernel_size=ker_size, strides=1, padding='same',
  141. dilation_rate=1,
  142. kernel_initializer='normal')(Input)
  143. #Output = BatchNormalization()(Output)
  144. Output = Activation('relu')(Output)
  145. Output = Conv1D(filters=32, kernel_size=ker_size, strides=1, padding='same',
  146. dilation_rate=1,
  147. kernel_initializer='normal')(Output)
  148. Output = Add()([Output, Input])
  149. return Output
  150. def resblock(Input, ker_size, red_dim):
  151. #Input = to_channels_first(Input)
  152. Output = Conv1D(filters=red_dim, kernel_size=1, strides=1, padding='same',
  153. dilation_rate=1,
  154. kernel_initializer='normal')(Input)
  155. #Output = BatchNormalization()(Output)
  156. Output = Activation('relu')(Output)
  157. Output = Conv1D(filters=red_dim, kernel_size=ker_size, strides=1, padding='same',
  158. dilation_rate=1,
  159. kernel_initializer='normal')(Output)
  160. #Output = BatchNormalization()(Output)
  161. Output = Activation('relu')(Output)
  162. Output = Conv1D(filters=32, kernel_size=1, strides=1, padding='same',
  163. dilation_rate=1,
  164. kernel_initializer='normal')(Output)
  165. #Output = BatchNormalization()(Output)
  166. Output = Add()([Output, Input])
  167. Output = PReLU(alpha_initializer='zeros', alpha_regularizer=None)(Output)
  168. return Output
  169. def resnet(in_size=8,
  170. out_size=256,
  171. num_units=2,
  172. red_dim=8,
  173. gpu_id=0,
  174. batch_size=32,
  175. ker_size=3):
  176. gpu_list = ["gpu(%d)" % gpu_id]
  177. a = Input(shape=(in_size,))
  178. first = Dense(256, kernel_initializer='normal')(a)
  179. first = PReLU(alpha_initializer='zeros', alpha_regularizer=None)(first)
  180. first = Reshape((8,32))(first)
  181. for units in np.arange(num_units):
  182. #first = resblock(first, ker_size, red_dim)
  183. first = resblock2(first, ker_size)
  184. last = Flatten()(first)
  185. #last = Dense(256, kernel_initializer='normal')(first)
  186. model = Model(inputs=a, outputs=last)
  187. #compile model
  188. model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K], context = gpu_list)
  189. return model