1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253 |
- from sklearn.model_selection import GridSearchCV
- from keras.models import Sequential
- from keras.layers import Dense
- from keras.wrappers.scikit_learn import KerasRegressor
- import scnets as scn
- from sklearn.metrics import make_scorer
- import numpy as np
- import pandas as pd
- def print_tuning_results(cvresults, modelfunc):
- pd.set_option('precision',2)
- bestidx = np.argsort(cvresults['mean_test_score'])
- scorelist = cvresults['mean_test_score'][bestidx]
- parlist = cvresults['params']
- runtlist = (1/60.0)*(cvresults['mean_fit_time'])
- runtlist = runtlist.astype('int64')
- bestlist = [parlist[indx] for indx in bestidx]
- par_count =[]
- for elem in bestlist:
- model = modelfunc(**elem)
- par_count.append(model.count_params())
- parkeylist = [key for key in bestlist[0]]
- columns = parkeylist + ['MRE(%)', 'Total Params', 'mean_fit_time']
- df = pd.DataFrame(columns=columns)
- for colno in np.arange(len(bestlist[0])):
- df[columns[colno]] = [elem[parkeylist[colno]] for elem in bestlist]
- df[columns[len(bestlist[0]) + 0]] = scorelist
- df[columns[len(bestlist[0])+ 1]] = par_count
- df[columns[len(bestlist[0])+ 2]] = runtlist
- return df
- # This will return the MRE error as score
- def mre_score_func(ground_truth, predictions):
- diff = np.abs(ground_truth - predictions)/np.abs(ground_truth)
- return -100*np.mean(diff)
- def get_cv_grid(modelfunc, param_grid, num_epochs, x_train, y_train):
- mre_score = make_scorer(mre_score_func, greater_is_better=False)
- #build estimator
- model = KerasRegressor(build_fn=modelfunc,
- epochs=num_epochs,
- verbose=0)
- grid = GridSearchCV(estimator=model,
- param_grid=param_grid,
- n_jobs=1,
- scoring=mre_score,
- verbose=1)
- grid_result = grid.fit(x_train, y_train)
- return grid_result.cv_results_
|