Browse Source

further develp in conv scatter nets

Ravi Hegde 6 years ago
parent
commit
1e493c3bd5

+ 320 - 0
.ipynb_checkpoints/conv_hyp-checkpoint.ipynb

@@ -0,0 +1,320 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-07T12:38:15.344220Z",
+     "start_time": "2018-09-07T12:38:15.340982Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "# we will study the hyperparamter tuning of fully connected scatternet\n",
+    "\n",
+    "# first find optimal number of layers and neuron numbers\n",
+    "# second optimize the batch size and number of epochs for the best learned architecture\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-08T12:01:25.348660Z",
+     "start_time": "2018-09-08T12:01:25.309257Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "The autoreload extension is already loaded. To reload it, use:\n",
+      "  %reload_ext autoreload\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/ipyparallel/client/client.py:459: RuntimeWarning: \n",
+      "            Controller appears to be listening on localhost, but not on this machine.\n",
+      "            If this is true, you should specify Client(...,sshserver='you@Gamma')\n",
+      "            or instruct your controller to listen on an external IP.\n",
+      "  RuntimeWarning)\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "[0, 1, 2, 3]"
+      ]
+     },
+     "execution_count": 2,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%load_ext autoreload\n",
+    "\n",
+    "import ipyparallel as ipp\n",
+    "rc = ipp.Client()\n",
+    "rc.ids"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Loading the dataset here"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-08T12:09:06.191974Z",
+     "start_time": "2018-09-08T12:09:05.681850Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[stdout:0] \n",
+      "Dataset has been loaded\n",
+      "x-train (44999, 8)\n",
+      "x-test  (55001, 8)\n",
+      "y-train (44999, 256)\n",
+      "y-test  (55001, 256)\n",
+      "[stdout:1] \n",
+      "Dataset has been loaded\n",
+      "x-train (44999, 8)\n",
+      "x-test  (55001, 8)\n",
+      "y-train (44999, 256)\n",
+      "y-test  (55001, 256)\n",
+      "[stdout:2] \n",
+      "Dataset has been loaded\n",
+      "x-train (44999, 8)\n",
+      "x-test  (55001, 8)\n",
+      "y-train (44999, 256)\n",
+      "y-test  (55001, 256)\n",
+      "[stdout:3] \n",
+      "Dataset has been loaded\n",
+      "x-train (44999, 8)\n",
+      "x-test  (55001, 8)\n",
+      "y-train (44999, 256)\n",
+      "y-test  (55001, 256)\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "[stderr:0] \n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "[stderr:1] \n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "[stderr:2] \n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "[stderr:3] \n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n"
+     ]
+    }
+   ],
+   "source": [
+    "%%px\n",
+    "\n",
+    "import numpy as np\n",
+    "\n",
+    "import h5py\n",
+    "from sklearn.model_selection import train_test_split\n",
+    "\n",
+    "#now load this dataset \n",
+    "h5f = h5py.File('./datasets/s8_sio2tio2_v2.h5','r')\n",
+    "X = h5f['sizes'][:]\n",
+    "Y = h5f['spectrum'][:]\n",
+    "\n",
+    "#get the ranges of the loaded data\n",
+    "num_layers = X.shape[1]\n",
+    "num_lpoints = Y.shape[1]\n",
+    "size_max = np.amax(X)\n",
+    "size_min = np.amin(X)\n",
+    "size_av = 0.5*(size_max + size_min)\n",
+    "\n",
+    "#this information is not given in the dataset\n",
+    "lam_min = 300\n",
+    "lam_max = 1200\n",
+    "lams = np.linspace(lam_min, lam_max, num_lpoints)\n",
+    "\n",
+    "#create a train - test split of the dataset\n",
+    "x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.55, random_state=42)\n",
+    "\n",
+    "# normalize inputs \n",
+    "x_train = (x_train - 50)/20 \n",
+    "x_test = (x_test - 50)/20 \n",
+    "\n",
+    "print(\"Dataset has been loaded\")\n",
+    "print(\"x-train\", x_train.shape)\n",
+    "print(\"x-test \", x_test.shape)\n",
+    "print(\"y-train\", y_train.shape)\n",
+    "print(\"y-test \", y_test.shape)\n",
+    "\n",
+    "import warnings\n",
+    "warnings.filterwarnings('ignore')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### create models here"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-08T12:09:43.998716Z",
+     "start_time": "2018-09-08T12:09:43.987026Z"
+    }
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<AsyncResult: execute>"
+      ]
+     },
+     "execution_count": 5,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%px --targets 0 --noblock\n",
+    "\n",
+    "%autoreload 2\n",
+    "\n",
+    "\n",
+    "\n",
+    "import hyptune as htun\n",
+    "import scnets as scn\n",
+    "\n",
+    "import os\n",
+    "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \n",
+    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+    "\n",
+    "# #resnet\n",
+    "func = scn.resnet\n",
+    "param_grid = dict(num_units=[2,3,4,5,6], \n",
+    "                  red_dim=[8,16],\n",
+    "                  batch_size=[32],\n",
+    "                  ker_size=[3, 5, 7])\n",
+    "    \n",
+    "cvresults = htun.get_cv_grid(modelfunc=func, \n",
+    "                             param_grid=param_grid,\n",
+    "                             num_epochs=250,\n",
+    "                             x_train=x_train,\n",
+    "                             y_train=y_train)\n",
+    "\n",
+    "htres = htun.print_tuning_results(cvresults, func)\n",
+    "print(htres.to_string(index=False))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-08T11:24:51.887905Z",
+     "start_time": "2018-09-08T11:24:51.878241Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "#%%px --targets 1 --noblock\n",
+    "\n",
+    "%autoreload 2\n",
+    "\n",
+    "\n",
+    "import hyptune as htun\n",
+    "import scnets as scn\n",
+    "\n",
+    "import os\n",
+    "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \n",
+    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+    "\n",
+    "# #resnet\n",
+    "func = scn.resnet\n",
+    "param_grid = dict(num_units=[2,3,4,5,6], \n",
+    "                  red_dim=[8,16],\n",
+    "                  batch_size=[64],\n",
+    "                  ker_size=[3, 5, 7])\n",
+    "    \n",
+    "cvresults = htun.get_cv_grid(modelfunc=func, \n",
+    "                             param_grid=param_grid,\n",
+    "                             num_epochs=250,\n",
+    "                             x_train=x_train,\n",
+    "                             y_train=y_train)\n",
+    "\n",
+    "htres = htun.print_tuning_results(cvresults, func)\n",
+    "print(htres.to_string(index=False))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.6"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 598 - 0
.ipynb_checkpoints/lkyrel_hyp-checkpoint.ipynb

@@ -0,0 +1,598 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# we will study the hyperparamter tuning of fully connected scatternet\n",
+    "\n",
+    "# first find optimal number of layers and neuron numbers\n",
+    "# second optimize the batch size and number of epochs for the best learned architecture\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Loading the dataset here"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-04T17:46:08.075289Z",
+     "start_time": "2018-09-04T17:46:07.653719Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Dataset has been loaded\n",
+      "x-train (44999, 8)\n",
+      "x-test  (55001, 8)\n",
+      "y-train (44999, 256)\n",
+      "y-test  (55001, 256)\n"
+     ]
+    }
+   ],
+   "source": [
+    "import numpy as np\n",
+    "\n",
+    "import h5py\n",
+    "from sklearn.model_selection import train_test_split\n",
+    "\n",
+    "#now load this dataset \n",
+    "h5f = h5py.File('./datasets/s8_sio2tio2_v2.h5','r')\n",
+    "X = h5f['sizes'][:]\n",
+    "Y = h5f['spectrum'][:]\n",
+    "\n",
+    "#get the ranges of the loaded data\n",
+    "num_layers = X.shape[1]\n",
+    "num_lpoints = Y.shape[1]\n",
+    "size_max = np.amax(X)\n",
+    "size_min = np.amin(X)\n",
+    "size_av = 0.5*(size_max + size_min)\n",
+    "\n",
+    "#this information is not given in the dataset\n",
+    "lam_min = 300\n",
+    "lam_max = 1200\n",
+    "lams = np.linspace(lam_min, lam_max, num_lpoints)\n",
+    "\n",
+    "#create a train - test split of the dataset\n",
+    "x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.55, random_state=42)\n",
+    "\n",
+    "# normalize inputs \n",
+    "x_train = (x_train - 50)/20 \n",
+    "x_test = (x_test - 50)/20 \n",
+    "\n",
+    "print(\"Dataset has been loaded\")\n",
+    "print(\"x-train\", x_train.shape)\n",
+    "print(\"x-test \", x_test.shape)\n",
+    "print(\"y-train\", y_train.shape)\n",
+    "print(\"y-test \", y_test.shape)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### create models here"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-04T08:41:45.424541Z",
+     "start_time": "2018-09-04T08:41:44.270792Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "import scnets as scn\n",
+    "from IPython.display import SVG\n",
+    "from keras.utils.vis_utils import model_to_dot\n",
+    "\n",
+    "#define and visualize the model here\n",
+    "#model = scn.fullycon(num_layers, num_lpoints, 4, 500, 2)\n",
+    "\n",
+    "model = scn.convprel(in_size=8, \n",
+    "        out_size=256,\n",
+    "        c1_nf=64,\n",
+    "        clayers=3,\n",
+    "        ker_size=3)\n",
+    "model.summary()\n",
+    "#SVG(model_to_dot(model).create(prog='dot', format='svg'))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-04T08:53:28.967090Z",
+     "start_time": "2018-09-04T08:42:58.990636Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "x_t, x_v, y_t, y_v = train_test_split(x_train, y_train, test_size=0.2, random_state=42)\n",
+    "history = model.fit(x_t, y_t,\n",
+    "                    batch_size=64,\n",
+    "                    epochs=250, \n",
+    "                    verbose=1,\n",
+    "                    validation_data=(x_v, y_v))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-04T09:13:20.325358Z",
+     "start_time": "2018-09-04T09:13:20.044281Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "scn.plot_training_history(history, 64*2.56)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "start_time": "2018-09-04T17:16:38.203Z"
+    },
+    "scrolled": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Fitting 3 folds for each of 12 candidates, totalling 36 fits\n"
+     ]
+    }
+   ],
+   "source": [
+    "\n",
+    "\n",
+    "\n",
+    "import warnings\n",
+    "warnings.filterwarnings('ignore')\n",
+    "\n",
+    "from sklearn.metrics import fbeta_score, make_scorer\n",
+    "\n",
+    "def my_custom_score_func(ground_truth, predictions):\n",
+    "    diff = np.abs(ground_truth - predictions)/np.abs(ground_truth)\n",
+    "    return np.mean(diff)\n",
+    "\n",
+    "\n",
+    "\n",
+    "from sklearn.model_selection import GridSearchCV\n",
+    "from keras.models import Sequential\n",
+    "from keras.layers import Dense\n",
+    "from keras.wrappers.scikit_learn import KerasRegressor\n",
+    "import scnets as scn\n",
+    "#model = KerasClassifier(build_fn=scn.fullycon, in_size=8, out_size=250, N_gpus=1, epochs=500, verbose=0)\n",
+    "\n",
+    "model = KerasRegressor(build_fn=scn.conv1dmodel, \n",
+    "                        in_size=8, \n",
+    "                        out_size=256, \n",
+    "                        c1_nf=64,\n",
+    "                        clayers=3,\n",
+    "                        ker_size=3,\n",
+    "                        epochs=250, \n",
+    "                        verbose=0)\n",
+    "my_score = make_scorer(my_custom_score_func, greater_is_better=False)\n",
+    "\n",
+    "\n",
+    "\n",
+    "param_grid = dict(ker_size=[3, 5],\n",
+    "                  clayers=[3,4,5],\n",
+    "                  batch_size=[32,64])                                  \n",
+    "grid = GridSearchCV(estimator=model, \n",
+    "                    param_grid=param_grid, \n",
+    "                    n_jobs=1, \n",
+    "                    scoring='explained_variance',\n",
+    "                    verbose=1)\n",
+    "grid_result = grid.fit(x_train, y_train)\n",
+    "\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-04T14:25:50.558503Z",
+     "start_time": "2018-09-04T14:25:50.539227Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "grid_result.cv_results_"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-04T14:26:16.796674Z",
+     "start_time": "2018-09-04T14:26:16.790511Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "grid_result.best_params_"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-04T15:01:18.552044Z",
+     "start_time": "2018-09-04T15:01:18.538960Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "bestidx = np.argsort(grid_result.cv_results_['mean_test_score'])\n",
+    "print(idx)\n",
+    "print(np.flip(idx))\n",
+    "parlist = grid_result.cv_results_['params']\n",
+    "bestlist = [parlist[indx] for indx in bestidx]\n",
+    "bestlist\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "'mean_fit_time': array([ 440.59602499,  481.73871398,  477.10135643,  504.1547633 ,\n",
+    "         488.27373465,  564.76856009,  571.80046884,  569.00620119,\n",
+    "         650.78964178,  740.00103672,  597.98015889,  659.03726633,\n",
+    "         476.14166268,  508.75317804,  505.10563159,  556.87875859,\n",
+    "         556.58007542,  542.48226706,  591.323385  ,  616.57070398,\n",
+    "         658.64879243,  891.65342418, 1055.23733377,  993.75831501,\n",
+    "         647.80959813,  629.8490928 ,  516.74331371,  787.65950712,\n",
+    "         635.2311732 ,  545.72071338,  904.7078863 ,  685.55618747,\n",
+    "         530.5135781 ,  815.14902997,  821.7310555 ,  685.34639025,\n",
+    "         240.90511258,  246.83409182,  259.09067996,  262.85167106,\n",
+    "         228.2099692 ,  392.25437156,  374.78024157,  452.21288816,\n",
+    "         459.90044618,  495.74129097,  501.12204981,  368.91637723,\n",
+    "         240.11106253,  237.93171946,  261.74841015,  265.67413298,\n",
+    "         274.72749496,  378.51081745,  436.21683431,  241.44164371,\n",
+    "         274.60629002,  330.47249166,  394.98302094,  318.71901139,\n",
+    "         237.12692181,  248.02535923,  254.61501988,  243.65626915,\n",
+    "         298.34629599,  273.75161084,  318.28928526,  312.33878072,\n",
+    "         309.61013468,  335.33733678,  316.16733519,  284.3205506 ]),\n",
+    "\n",
+    " 'params': [{'batch_size': 32, 'c1_nf': 32, 'clayers': 1, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 1, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 1, 'ker_size': 7},\n",
+    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 2, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 2, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 2, 'ker_size': 7},\n",
+    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 3, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 3, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 3, 'ker_size': 7},\n",
+    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 4, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 4, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 4, 'ker_size': 7},\n",
+    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 1, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 1, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 1, 'ker_size': 7},\n",
+    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 2, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 2, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 2, 'ker_size': 7},\n",
+    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 3, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 3, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 3, 'ker_size': 7},\n",
+    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 4, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 4, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 4, 'ker_size': 7},\n",
+    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 1, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 1, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 1, 'ker_size': 7},\n",
+    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 2, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 2, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 2, 'ker_size': 7},\n",
+    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 3, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 3, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 3, 'ker_size': 7},\n",
+    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 4, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 4, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 4, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 1, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 1, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 1, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 2, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 2, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 2, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 3, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 3, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 3, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 4, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 4, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 4, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 1, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 1, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 1, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 2, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 2, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 2, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 3, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 3, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 3, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 4, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 4, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 4, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 1, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 1, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 1, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 2, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 2, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 2, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 3, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 3, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 3, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 4, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 4, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 4, 'ker_size': 7}],\n",
+    "\n",
+    "\n",
+    " 'mean_test_score': array([0.00235561, 0.00175559, 0.00191115, 0.00255561, 0.00228894,\n",
+    "        0.00233339, 0.00240005, 0.00260006, 0.00262228, 0.00222227,\n",
+    "        0.00222227, 0.00235561, 0.00191115, 0.0021556 , 0.00242228,\n",
+    "        0.00217783, 0.00217783, 0.00233339, 0.00231116, 0.00202227,\n",
+    "        0.00206671, 0.00251117, 0.00237783, 0.00253339, 0.00197782,\n",
+    "        0.00222227, 0.00220005, 0.0019556 , 0.00208894, 0.00240005,\n",
+    "        0.00220005, 0.00204449, 0.00222227, 0.00228894, 0.00231116,\n",
+    "        0.00246672, 0.00200004, 0.00182226, 0.00164448, 0.00197782,\n",
+    "        0.0026445 , 0.00208894, 0.00222227, 0.00248894, 0.00217783,\n",
+    "        0.00208894, 0.00253339, 0.0024445 , 0.00204449, 0.00188893,\n",
+    "        0.00197782, 0.00233339, 0.00231116, 0.00231116, 0.00233339,\n",
+    "        0.0021556 , 0.00231116, 0.0021556 , 0.00220005, 0.00233339,\n",
+    "        0.0021556 , 0.00220005, 0.00251117, 0.00191115, 0.00197782,\n",
+    "        0.0021556 , 0.00224449, 0.00231116, 0.00228894, 0.00220005,\n",
+    "        0.00235561, 0.00211116]),\n",
+    " 'std_test_score': array([5.45313915e-04, 1.36982222e-04, 5.71830319e-04, 3.09583745e-04,\n",
+    "        4.63034070e-04, 1.88601629e-04, 3.57010463e-04, 2.49388000e-04,\n",
+    "        2.06134688e-04, 4.75566024e-04, 1.74937538e-04, 4.01290372e-04,\n",
+    "        2.06146739e-04, 5.14564491e-04, 3.28118192e-04, 3.09506255e-04,\n",
+    "        3.45776617e-04, 5.65777616e-04, 6.28186970e-05, 2.45451156e-04,\n",
+    "        3.03064138e-04, 4.69393085e-04, 3.70577110e-04, 2.37355340e-04,\n",
+    "        5.14534288e-04, 2.99865023e-04, 1.08929121e-04, 3.28165699e-04,\n",
+    "        1.91227745e-04, 1.44095217e-04, 1.96333991e-04, 1.13377551e-04,\n",
+    "        6.28899342e-05, 4.93994775e-04, 5.05855189e-04, 4.25227642e-04,\n",
+    "        5.25013167e-04, 1.66308696e-04, 1.91169192e-04, 7.89511257e-04,\n",
+    "        3.09609747e-04, 2.20000709e-04, 1.36980546e-04, 2.99872491e-04,\n",
+    "        2.06155046e-04, 3.70481852e-04, 1.88604774e-04, 2.74052629e-04,\n",
+    "        4.15813294e-04, 2.99823569e-04, 2.06148816e-04, 1.44067451e-04,\n",
+    "        3.50039135e-04, 2.79388549e-04, 3.81084580e-04, 3.62491814e-04,\n",
+    "        2.26678399e-04, 4.08631950e-04, 4.35536821e-04, 2.49505205e-04,\n",
+    "        4.53303035e-04, 1.44064707e-04, 5.77012935e-04, 8.31603039e-05,\n",
+    "        5.34338425e-04, 2.79402571e-04, 2.45459743e-04, 1.36996891e-04,\n",
+    "        3.86260180e-04, 1.88498951e-04, 3.09579276e-04, 3.70571422e-04]),\n",
+    " 'rank_test_score': array([16, 71, 66,  4, 30, 19, 13,  3,  2, 34, 34, 16, 66, 47, 12, 44, 44,\n",
+    "        19, 24, 59, 56,  7, 15,  5, 61, 34, 39, 65, 53, 13, 39, 57, 34, 30,\n",
+    "        24, 10, 60, 70, 72, 61,  1, 53, 34,  9, 44, 53,  5, 11, 57, 69, 61,\n",
+    "        19, 24, 24, 19, 47, 24, 47, 39, 19, 47, 39,  7, 66, 61, 47, 33, 24,\n",
+    "        30, 39, 16, 52], dtype=int32),\n",
+    "\n",
+    "\n",
+    " 'mean_train_score': array([0.00231116, 0.00187782, 0.0018556 , 0.00231116, 0.00222227,\n",
+    "        0.00217783, 0.00228894, 0.00236672, 0.00232227, 0.00252228,\n",
+    "        0.00213338, 0.00241117, 0.00214449, 0.00204449, 0.00235561,\n",
+    "        0.00205561, 0.00217783, 0.00238894, 0.00213338, 0.00230005,\n",
+    "        0.0021556 , 0.0023445 , 0.00238894, 0.00236672, 0.00203338,\n",
+    "        0.00208894, 0.00232228, 0.00203338, 0.00258895, 0.00216671,\n",
+    "        0.00221117, 0.00217783, 0.0022556 , 0.00224449, 0.00246672,\n",
+    "        0.00240006, 0.00187782, 0.00183338, 0.00153337, 0.00223338,\n",
+    "        0.00186671, 0.00204449, 0.00218894, 0.00227783, 0.00203338,\n",
+    "        0.00257784, 0.00235561, 0.0023445 , 0.00200004, 0.00210005,\n",
+    "        0.00204449, 0.00247784, 0.00233338, 0.00230005, 0.00223338,\n",
+    "        0.00222227, 0.00228894, 0.0023445 , 0.00228894, 0.00248895,\n",
+    "        0.00188893, 0.00211116, 0.00233339, 0.00236672, 0.00213338,\n",
+    "        0.00204449, 0.00248895, 0.00238895, 0.00233338, 0.00231116,\n",
+    "        0.0023445 , 0.00241116]),\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "{'mean_fit_time': array([ 893.33654523,  898.53078222, 1119.14394736, 1130.1775128 ,\n",
+    "         956.51246222,  964.45365715, 1209.3984166 , 1582.91039157,\n",
+    "        1394.26560704, 1616.26630108, 1266.47200227, 1116.83488099,\n",
+    "        1205.42738708, 1201.92515103, 1210.92550143]),\n",
+    " 'std_fit_time': array([  3.08891285,   6.81113186, 212.20371026, 238.93357922,\n",
+    "          6.97112622,  18.87349827, 223.00445851,  57.7875855 ,\n",
+    "        100.70476936,  43.95356933, 134.83849082,  11.29690679,\n",
+    "          5.42330543,   6.19267952,   4.92641743]),\n",
+    " 'params': [{'N_hidden': 1, 'N_neurons': 250},\n",
+    "  {'N_hidden': 1, 'N_neurons': 500},\n",
+    "  {'N_hidden': 1, 'N_neurons': 1000},\n",
+    "  {'N_hidden': 2, 'N_neurons': 250},\n",
+    "  {'N_hidden': 2, 'N_neurons': 500},\n",
+    "  {'N_hidden': 2, 'N_neurons': 1000},\n",
+    "  {'N_hidden': 3, 'N_neurons': 250},\n",
+    "  {'N_hidden': 3, 'N_neurons': 500},\n",
+    "  {'N_hidden': 3, 'N_neurons': 1000},\n",
+    "  {'N_hidden': 4, 'N_neurons': 250},\n",
+    "  {'N_hidden': 4, 'N_neurons': 500},\n",
+    "  {'N_hidden': 4, 'N_neurons': 1000},\n",
+    "  {'N_hidden': 5, 'N_neurons': 250},\n",
+    "  {'N_hidden': 5, 'N_neurons': 500},\n",
+    "  {'N_hidden': 5, 'N_neurons': 1000}],\n",
+    " 'split0_test_score': array([0.00235, 0.00225, 0.0025 , 0.0024 , 0.0026 , 0.0021 , 0.00165,\n",
+    "        0.0023 , 0.00255, 0.00255, 0.0024 , 0.0027 , 0.0021 , 0.00225,\n",
+    "        0.0022 ]),\n",
+    " 'split1_test_score': array([0.0024 , 0.00225, 0.0022 , 0.0022 , 0.00235, 0.0022 , 0.0021 ,\n",
+    "        0.00195, 0.00215, 0.0022 , 0.00185, 0.00195, 0.002  , 0.00195,\n",
+    "        0.0021 ]),\n",
+    " 'split2_test_score': array([0.00255, 0.00315, 0.00275, 0.00295, 0.00355, 0.0032 , 0.00275,\n",
+    "        0.0031 , 0.0028 , 0.00305, 0.00305, 0.00355, 0.00275, 0.00285,\n",
+    "        0.00305]),\n",
+    " 'mean_test_score': array([0.00243333, 0.00255   , 0.00248333, 0.00251667, 0.00283333,\n",
+    "        0.0025    , 0.00216667, 0.00245   , 0.0025    , 0.0026    ,\n",
+    "        0.00243333, 0.00273333, 0.00228333, 0.00235   , 0.00245   ]),\n",
+    " 'std_test_score': array([8.49836586e-05, 4.24264069e-04, 2.24845626e-04, 3.17104960e-04,\n",
+    "        5.16935414e-04, 4.96655481e-04, 4.51540573e-04, 4.81317636e-04,\n",
+    "        2.67706307e-04, 3.48807492e-04, 4.90464632e-04, 6.53622385e-04,\n",
+    "        3.32498956e-04, 3.74165739e-04, 4.26223728e-04]),\n",
+    " 'rank_test_score': array([11,  4,  8,  5,  1,  6, 15,  9,  6,  3, 11,  2, 14, 13,  9],\n",
+    "       dtype=int32),\n",
+    " 'split0_train_score': array([0.00255 , 0.0024  , 0.0023  , 0.00255 , 0.00255 , 0.002375,\n",
+    "        0.002125, 0.002625, 0.0025  , 0.002775, 0.002625, 0.0027  ,\n",
+    "        0.0023  , 0.00245 , 0.002275]),\n",
+    " 'split1_train_score': array([0.002975, 0.0025  , 0.00255 , 0.0028  , 0.0029  , 0.002475,\n",
+    "        0.002575, 0.0027  , 0.002575, 0.002375, 0.002475, 0.002475,\n",
+    "        0.002325, 0.002425, 0.002375]),\n",
+    " 'split2_train_score': array([0.00215 , 0.0022  , 0.00215 , 0.002375, 0.00205 , 0.002175,\n",
+    "        0.00215 , 0.0022  , 0.002   , 0.00235 , 0.00235 , 0.002525,\n",
+    "        0.00215 , 0.002175, 0.002175]),\n",
+    " 'mean_train_score': array([0.00255833, 0.00236667, 0.00233333, 0.002575  , 0.0025    ,\n",
+    "        0.00234167, 0.00228333, 0.00250833, 0.00235833, 0.0025    ,\n",
+    "        0.00248333, 0.00256667, 0.00225833, 0.00235   , 0.002275  ]),\n",
+    " 'std_train_score': array([3.36856382e-04, 1.24721913e-04, 1.64991582e-04, 1.74403746e-04,\n",
+    "        3.48807492e-04, 1.24721913e-04, 2.06491862e-04, 2.20164080e-04,\n",
+    "        2.55223214e-04, 1.94722024e-04, 1.12422813e-04, 9.64653075e-05,\n",
+    "        7.72801541e-05, 1.24163870e-04, 8.16496581e-05])}\n",
+    "        \n",
+    " \n",
+    " {'mean_fit_time': array([  685.01906315,  1809.28454868,   336.60541034,   878.23016135]),\n",
+    " 'mean_score_time': array([ 1.38006322,  1.27389534,  0.6934317 ,  0.69225407]),\n",
+    " 'mean_test_score': array([ 0.00241667,  0.00251667,  0.00243333,  0.00261667]),\n",
+    " 'mean_train_score': array([ 0.00245833,  0.00236667,  0.00248333,  0.00253333]),\n",
+    " 'param_batch_size': masked_array(data = [32 32 64 64],\n",
+    "              mask = [False False False False],\n",
+    "        fill_value = ?),\n",
+    " 'param_epochs': masked_array(data = [200 500 200 500],\n",
+    "              mask = [False False False False],\n",
+    "        fill_value = ?),\n",
+    " 'params': ({'batch_size': 32, 'epochs': 200},\n",
+    "  {'batch_size': 32, 'epochs': 500},\n",
+    "  {'batch_size': 64, 'epochs': 200},\n",
+    "  {'batch_size': 64, 'epochs': 500}),\n",
+    " 'rank_test_score': array([4, 2, 3, 1], dtype=int32),\n",
+    " 'split0_test_score': array([ 0.0021 ,  0.00225,  0.00215,  0.00225]),\n",
+    " 'split0_train_score': array([ 0.00235 ,  0.0023  ,  0.002625,  0.002575]),\n",
+    " 'split1_test_score': array([ 0.00225,  0.00225,  0.00215,  0.00235]),\n",
+    " 'split1_train_score': array([ 0.002675,  0.002725,  0.002675,  0.002825]),\n",
+    " 'split2_test_score': array([ 0.0029 ,  0.00305,  0.003  ,  0.00325]),\n",
+    " 'split2_train_score': array([ 0.00235 ,  0.002075,  0.00215 ,  0.0022  ]),\n",
+    " 'std_fit_time': array([  27.85582158,  121.41697465,    1.58335506,   11.64839192]),\n",
+    " 'std_score_time': array([ 0.01602076,  0.06291871,  0.03384719,  0.05541393]),\n",
+    " 'std_test_score': array([ 0.00034721,  0.00037712,  0.00040069,  0.00044969]),\n",
+    " 'std_train_score': array([ 0.00015321,  0.00026952,  0.00023658,  0.00025685])}\n",
+    "        \n",
+    "        \n",
+    "        'mean_fit_time': array([1236.77363722, 1263.8373781 , 1283.07971772,  617.23694984,\n",
+    "         644.64875857,  630.75466394]),\n",
+    " 'std_fit_time': array([15.23634435,  1.04774932, 70.7173362 , 19.87266061,  3.13235316,\n",
+    "        19.13357172]),\n",
+    " 'mean_score_time': array([1.9509182 , 2.09144211, 2.07234033, 1.05850196, 1.09700545,\n",
+    "        1.07024908]),\n",
+    " 'std_score_time': array([0.09494565, 0.09207867, 0.09335411, 0.04954752, 0.04209056,\n",
+    "        0.05320864]),\n",
+    " 'param_batch_size': masked_array(data=[32, 32, 32, 64, 64, 64],\n",
+    "              mask=[False, False, False, False, False, False],\n",
+    "        fill_value='?',\n",
+    "             dtype=object),\n",
+    " 'param_ker_size': masked_array(data=[3, 5, 7, 3, 5, 7],\n",
+    "              mask=[False, False, False, False, False, False],\n",
+    "        fill_value='?',\n",
+    "             dtype=object),\n",
+    " 'params': [{'batch_size': 32, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'ker_size': 7}],\n",
+    " 'split0_test_score': array([0.00225, 0.0017 , 0.0027 , 0.00225, 0.00205, 0.0024 ]),\n",
+    " 'split1_test_score': array([0.0021 , 0.0023 , 0.0023 , 0.0019 , 0.002  , 0.00165]),\n",
+    " 'split2_test_score': array([0.00325, 0.00215, 0.0021 , 0.00245, 0.0024 , 0.0024 ]),\n",
+    " 'mean_test_score': array([0.00253333, 0.00205   , 0.00236667, 0.0022    , 0.00215   ,\n",
+    "        0.00215   ]),\n",
+    " 'std_test_score': array([0.00051045, 0.00025495, 0.00024944, 0.0002273 , 0.00017795,\n",
+    "        0.00035355]),\n",
+    " 'rank_test_score': array([1, 6, 2, 3, 4, 4], dtype=int32),\n",
+    " 'split0_train_score': array([0.001875, 0.001625, 0.002525, 0.002175, 0.0019  , 0.002275]),\n",
+    " 'split1_train_score': array([0.0027  , 0.00275 , 0.00245 , 0.0022  , 0.002125, 0.002475]),\n",
+    " 'split2_train_score': array([0.0024  , 0.00205 , 0.0019  , 0.0021  , 0.00225 , 0.001775]),\n",
+    " 'mean_train_score': array([0.002325  , 0.00214167, 0.00229167, 0.00215833, 0.00209167,\n",
+    "        0.002175  ]),\n",
+    " 'std_train_score': array([3.40954542e-04, 4.63830668e-04, 2.78637558e-04, 4.24918293e-05,\n",
+    "        1.44817893e-04, 2.94392029e-04])}\n",
+    "        "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.6"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 331 - 0
Untitled.ipynb

@@ -0,0 +1,331 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-11T05:53:10.003222Z",
+     "start_time": "2018-09-11T05:53:09.983926Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/ipyparallel/client/client.py:459: RuntimeWarning: \n",
+      "            Controller appears to be listening on localhost, but not on this machine.\n",
+      "            If this is true, you should specify Client(...,sshserver='you@Gamma')\n",
+      "            or instruct your controller to listen on an external IP.\n",
+      "  RuntimeWarning)\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "[0, 1, 2, 3, 4, 5, 6, 7]"
+      ]
+     },
+     "execution_count": 3,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "import ipyparallel as ipp\n",
+    "rc = ipp.Client()\n",
+    "rc.ids"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-11T05:59:18.853907Z",
+     "start_time": "2018-09-11T05:59:18.560633Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[stdout:0] Dataset has been loaded\n",
+      "[stdout:1] Dataset has been loaded\n",
+      "[stdout:2] Dataset has been loaded\n",
+      "[stdout:3] Dataset has been loaded\n",
+      "[stdout:4] Dataset has been loaded\n",
+      "[stdout:5] Dataset has been loaded\n",
+      "[stdout:6] Dataset has been loaded\n",
+      "[stdout:7] Dataset has been loaded\n"
+     ]
+    }
+   ],
+   "source": [
+    "%%px\n",
+    "\n",
+    "import numpy as np\n",
+    "\n",
+    "import h5py\n",
+    "from sklearn.model_selection import train_test_split\n",
+    "\n",
+    "#now load this dataset \n",
+    "h5f = h5py.File('./datasets/s8_sio2tio2_v2.h5','r')\n",
+    "X = h5f['sizes'][:]\n",
+    "Y = h5f['spectrum'][:]\n",
+    "\n",
+    "#get the ranges of the loaded data\n",
+    "num_layers = X.shape[1]\n",
+    "num_lpoints = Y.shape[1]\n",
+    "size_max = np.amax(X)\n",
+    "size_min = np.amin(X)\n",
+    "size_av = 0.5*(size_max + size_min)\n",
+    "\n",
+    "#this information is not given in the dataset\n",
+    "lam_min = 300\n",
+    "lam_max = 1200\n",
+    "lams = np.linspace(lam_min, lam_max, num_lpoints)\n",
+    "\n",
+    "#create a train - test split of the dataset\n",
+    "x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.55, random_state=42)\n",
+    "\n",
+    "# normalize inputs \n",
+    "x_train = (x_train - 50)/20 \n",
+    "x_test = (x_test - 50)/20 \n",
+    "\n",
+    "print(\"Dataset has been loaded\")\n",
+    "# print(\"x-train\", x_train.shape)\n",
+    "# print(\"x-test \", x_test.shape)\n",
+    "# print(\"y-train\", y_train.shape)\n",
+    "# print(\"y-test \", y_test.shape)\n",
+    "\n",
+    "import warnings\n",
+    "warnings.filterwarnings('ignore')\n",
+    "\n",
+    "import hyptune as htun\n",
+    "import scnets as scn"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-11T06:09:41.335279Z",
+     "start_time": "2018-09-11T06:09:41.326581Z"
+    }
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<AsyncResult: execute>"
+      ]
+     },
+     "execution_count": 9,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%px --target 0 --noblock\n",
+    "import os\n",
+    "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \n",
+    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+    "func = scn.resnet\n",
+    "param_grid = dict(num_units=[2], \n",
+    "                  red_dim=[8,16],\n",
+    "                  batch_size=[32, 64],\n",
+    "                  ker_size=[3, 5, 7])\n",
+    "    \n",
+    "cvresults = htun.get_cv_grid(modelfunc=func, \n",
+    "                             param_grid=param_grid,\n",
+    "                             num_epochs=250,\n",
+    "                             x_train=x_train,\n",
+    "                             y_train=y_train)\n",
+    "\n",
+    "htres = htun.print_tuning_results(cvresults, func)\n",
+    "htres.to_csv('n2_cvsrch.dat')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-11T06:11:05.592120Z",
+     "start_time": "2018-09-11T06:11:05.583939Z"
+    }
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<AsyncResult: execute>"
+      ]
+     },
+     "execution_count": 12,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%px --target 1 --noblock\n",
+    "import os\n",
+    "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \n",
+    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n",
+    "func = scn.resnet\n",
+    "param_grid = dict(num_units=[3], \n",
+    "                  red_dim=[8,16],\n",
+    "                  batch_size=[32, 64],\n",
+    "                  ker_size=[3, 5, 7])\n",
+    "    \n",
+    "cvresults = htun.get_cv_grid(modelfunc=func, \n",
+    "                             param_grid=param_grid,\n",
+    "                             num_epochs=250,\n",
+    "                             x_train=x_train,\n",
+    "                             y_train=y_train)\n",
+    "\n",
+    "htres = htun.print_tuning_results(cvresults, func)\n",
+    "htres.to_csv('n3_cvsrch.dat')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-11T06:10:10.156751Z",
+     "start_time": "2018-09-11T06:10:10.147263Z"
+    }
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<AsyncResult: execute>"
+      ]
+     },
+     "execution_count": 10,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%px --target 2 --noblock\n",
+    "import os\n",
+    "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \n",
+    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+    "func = scn.resnet\n",
+    "param_grid = dict(num_units=[4], \n",
+    "                  red_dim=[8,16],\n",
+    "                  batch_size=[32, 64],\n",
+    "                  ker_size=[3, 5, 7])\n",
+    "    \n",
+    "cvresults = htun.get_cv_grid(modelfunc=func, \n",
+    "                             param_grid=param_grid,\n",
+    "                             num_epochs=250,\n",
+    "                             x_train=x_train,\n",
+    "                             y_train=y_train)\n",
+    "\n",
+    "htres = htun.print_tuning_results(cvresults, func)\n",
+    "htres.to_csv('n4_cvsrch.dat')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 13,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-11T06:11:12.169586Z",
+     "start_time": "2018-09-11T06:11:12.161932Z"
+    }
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<AsyncResult: execute>"
+      ]
+     },
+     "execution_count": 13,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%px --target 3 --noblock\n",
+    "import os\n",
+    "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \n",
+    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n",
+    "func = scn.resnet\n",
+    "param_grid = dict(num_units=[5], \n",
+    "                  red_dim=[8,16],\n",
+    "                  batch_size=[32, 64],\n",
+    "                  ker_size=[3, 5, 7])\n",
+    "    \n",
+    "cvresults = htun.get_cv_grid(modelfunc=func, \n",
+    "                             param_grid=param_grid,\n",
+    "                             num_epochs=250,\n",
+    "                             x_train=x_train,\n",
+    "                             y_train=y_train)\n",
+    "\n",
+    "htres = htun.print_tuning_results(cvresults, func)\n",
+    "htres.to_csv('n5_cvsrch.dat')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-11T06:10:40.811578Z",
+     "start_time": "2018-09-11T06:10:40.804963Z"
+    }
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<AsyncResult: execute>"
+      ]
+     },
+     "execution_count": 11,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%px --target 4 --noblock\n",
+    "import os\n",
+    "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \n",
+    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+    "func = scn.resnet\n",
+    "param_grid = dict(num_units=[6], \n",
+    "                  red_dim=[8,16],\n",
+    "                  batch_size=[32, 64],\n",
+    "                  ker_size=[3, 5, 7])\n",
+    "    \n",
+    "cvresults = htun.get_cv_grid(modelfunc=func, \n",
+    "                             param_grid=param_grid,\n",
+    "                             num_epochs=250,\n",
+    "                             x_train=x_train,\n",
+    "                             y_train=y_train)\n",
+    "\n",
+    "htres = htun.print_tuning_results(cvresults, func)\n",
+    "htres.to_csv('n6_cvsrch.dat')"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

BIN
__pycache__/hyptune.cpython-36.pyc


BIN
__pycache__/scnets.cpython-36.pyc


+ 264 - 0
conv_hyp.ipynb

@@ -0,0 +1,264 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-07T12:38:15.344220Z",
+     "start_time": "2018-09-07T12:38:15.340982Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "# we will study the hyperparamter tuning of fully connected scatternet\n",
+    "\n",
+    "# first find optimal number of layers and neuron numbers\n",
+    "# second optimize the batch size and number of epochs for the best learned architecture\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-08T12:01:25.348660Z",
+     "start_time": "2018-09-08T12:01:25.309257Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "The autoreload extension is already loaded. To reload it, use:\n",
+      "  %reload_ext autoreload\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/ipyparallel/client/client.py:459: RuntimeWarning: \n",
+      "            Controller appears to be listening on localhost, but not on this machine.\n",
+      "            If this is true, you should specify Client(...,sshserver='you@Gamma')\n",
+      "            or instruct your controller to listen on an external IP.\n",
+      "  RuntimeWarning)\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "[0, 1, 2, 3]"
+      ]
+     },
+     "execution_count": 2,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%load_ext autoreload\n",
+    "\n",
+    "import ipyparallel as ipp\n",
+    "rc = ipp.Client()\n",
+    "rc.ids"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Loading the dataset here"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-08T12:25:26.143831Z",
+     "start_time": "2018-09-08T12:25:26.137752Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "UsageError: Cell magic `%%px` not found.\n"
+     ]
+    }
+   ],
+   "source": [
+    "%%px\n",
+    "\n",
+    "import numpy as np\n",
+    "\n",
+    "import h5py\n",
+    "from sklearn.model_selection import train_test_split\n",
+    "\n",
+    "#now load this dataset \n",
+    "h5f = h5py.File('./datasets/s8_sio2tio2_v2.h5','r')\n",
+    "X = h5f['sizes'][:]\n",
+    "Y = h5f['spectrum'][:]\n",
+    "\n",
+    "#get the ranges of the loaded data\n",
+    "num_layers = X.shape[1]\n",
+    "num_lpoints = Y.shape[1]\n",
+    "size_max = np.amax(X)\n",
+    "size_min = np.amin(X)\n",
+    "size_av = 0.5*(size_max + size_min)\n",
+    "\n",
+    "#this information is not given in the dataset\n",
+    "lam_min = 300\n",
+    "lam_max = 1200\n",
+    "lams = np.linspace(lam_min, lam_max, num_lpoints)\n",
+    "\n",
+    "#create a train - test split of the dataset\n",
+    "x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.55, random_state=42)\n",
+    "\n",
+    "# normalize inputs \n",
+    "x_train = (x_train - 50)/20 \n",
+    "x_test = (x_test - 50)/20 \n",
+    "\n",
+    "print(\"Dataset has been loaded\")\n",
+    "print(\"x-train\", x_train.shape)\n",
+    "print(\"x-test \", x_test.shape)\n",
+    "print(\"y-train\", y_train.shape)\n",
+    "print(\"y-test \", y_test.shape)\n",
+    "\n",
+    "import warnings\n",
+    "warnings.filterwarnings('ignore')\n",
+    "\n",
+    "\n",
+    "import hyptune as htun\n",
+    "import scnets as scn\n",
+    "\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### create models here"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-08T12:09:43.998716Z",
+     "start_time": "2018-09-08T12:09:43.987026Z"
+    }
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<AsyncResult: execute>"
+      ]
+     },
+     "execution_count": 5,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%px --targets 0 --noblock\n",
+    "\n",
+    "%autoreload 2\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "# #resnet\n",
+    "func = scn.resnet\n",
+    "param_grid = dict(num_units=[2,3,4,5,6], \n",
+    "                  red_dim=[8,16],\n",
+    "                  batch_size=[32],\n",
+    "                  ker_size=[3, 5, 7])\n",
+    "    \n",
+    "cvresults = htun.get_cv_grid(modelfunc=func, \n",
+    "                             param_grid=param_grid,\n",
+    "                             num_epochs=250,\n",
+    "                             x_train=x_train,\n",
+    "                             y_train=y_train)\n",
+    "\n",
+    "htres = htun.print_tuning_results(cvresults, func)\n",
+    "print(htres.to_string(index=False))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-08T11:24:51.887905Z",
+     "start_time": "2018-09-08T11:24:51.878241Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "#%%px --targets 1 --noblock\n",
+    "\n",
+    "%autoreload 2\n",
+    "\n",
+    "\n",
+    "import hyptune as htun\n",
+    "import scnets as scn\n",
+    "\n",
+    "import os\n",
+    "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \n",
+    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
+    "\n",
+    "# #resnet\n",
+    "func = scn.resnet\n",
+    "param_grid = dict(num_units=[2,3,4,5,6], \n",
+    "                  red_dim=[8,16],\n",
+    "                  batch_size=[64],\n",
+    "                  ker_size=[3, 5, 7])\n",
+    "    \n",
+    "cvresults = htun.get_cv_grid(modelfunc=func, \n",
+    "                             param_grid=param_grid,\n",
+    "                             num_epochs=250,\n",
+    "                             x_train=x_train,\n",
+    "                             y_train=y_train)\n",
+    "\n",
+    "htres = htun.print_tuning_results(cvresults, func)\n",
+    "print(htres.to_string(index=False))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.6"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 41 - 0
fulldset.py

@@ -0,0 +1,41 @@
+import snlay as snlay
+import h5py
+import numpy as np
+from scattnlay import scattnlay
+import time
+import progressbar
+
+num_lpoints = 250
+lam_min = 300
+lam_max = 1200
+size_min = 30
+size_max = 70
+
+
+
+
+
+
+#dataset_size = 100000 
+
+## parameters of the dataset
+#num_layers = 8
+
+##make the materials list here
+#mats = [3, 4, 3, 4, 3, 4, 3, 4]       # 2 - silicon, silicon.dat,   1 - gold,  gold.dat
+#                                      # 3 - silica,  silica.dat,    4 - titania,  tio2.dat
+##generate a huge array and then reshape
+#dataset_X = np.random.randint(size_min,size_max+1,num_layers*dataset_size).astype(float).reshape(dataset_size, num_layers)
+#lams = np.linspace(lam_min, lam_max, num_lpoints)
+#dataset_Y = np.zeros((dataset_size,num_lpoints))
+
+#for ind in progressbar.progressbar(np.arange(dataset_size)):
+#    kr, m = snlay.make_xm(dataset_X[ind,:], mats, lams)
+#    terms, dataset_Y[ind,:], Qsca, Qabs, Qbk, Qpr, g, Albedo, S1, S2 = scattnlay(kr, m)
+
+
+#h5f = h5py.File('./datasets/full_sio2tio2.h5', 'w')
+#h5f.create_dataset('sizes', data=dataset_X)
+#h5f.create_dataset('spectrum', data=dataset_Y)
+#h5f.close()
+

+ 130 - 349
fully_con_hyp_tuning.ipynb

@@ -21,14 +21,38 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 1,
    "metadata": {
     "ExecuteTime": {
-     "end_time": "2018-09-04T10:56:22.052050Z",
-     "start_time": "2018-09-04T10:56:21.638962Z"
+     "end_time": "2018-09-04T16:14:58.074796Z",
+     "start_time": "2018-09-04T16:14:57.660525Z"
     }
    },
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Dataset has been loaded\n",
+      "x-train (44999, 8)\n",
+      "x-test  (55001, 8)\n",
+      "y-train (44999, 256)\n",
+      "y-test  (55001, 256)\n"
+     ]
+    }
+   ],
    "source": [
     "import numpy as np\n",
     "\n",
@@ -75,14 +99,48 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 21,
    "metadata": {
     "ExecuteTime": {
-     "end_time": "2018-09-04T08:41:45.424541Z",
-     "start_time": "2018-09-04T08:41:44.270792Z"
+     "end_time": "2018-09-06T07:21:58.310439Z",
+     "start_time": "2018-09-06T07:21:57.952080Z"
     }
    },
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "_________________________________________________________________\n",
+      "Layer (type)                 Output Shape              Param #   \n",
+      "=================================================================\n",
+      "first (Dense)                (None, 256)               2304      \n",
+      "_________________________________________________________________\n",
+      "Reshape1 (Reshape)           (None, 4, 64)             0         \n",
+      "_________________________________________________________________\n",
+      "Up1 (UpSampling1D)           (None, 8, 64)             0         \n",
+      "_________________________________________________________________\n",
+      "Conv1 (Conv1D)               (None, 8, 64)             28736     \n",
+      "_________________________________________________________________\n",
+      "Conv2 (Conv1D)               (None, 8, 32)             14368     \n",
+      "_________________________________________________________________\n",
+      "Conv3 (Conv1D)               (None, 8, 32)             7200      \n",
+      "_________________________________________________________________\n",
+      "Conv4 (Conv1D)               (None, 8, 32)             7200      \n",
+      "_________________________________________________________________\n",
+      "Conv5 (Conv1D)               (None, 8, 32)             7200      \n",
+      "_________________________________________________________________\n",
+      "Conv6 (Conv1D)               (None, 8, 32)             7200      \n",
+      "_________________________________________________________________\n",
+      "flatten_105 (Flatten)        (None, 256)               0         \n",
+      "=================================================================\n",
+      "Total params: 74,208\n",
+      "Trainable params: 74,208\n",
+      "Non-trainable params: 0\n",
+      "_________________________________________________________________\n"
+     ]
+    }
+   ],
    "source": [
     "import scnets as scn\n",
     "from IPython.display import SVG\n",
@@ -91,11 +149,11 @@
     "#define and visualize the model here\n",
     "#model = scn.fullycon(num_layers, num_lpoints, 4, 500, 2)\n",
     "\n",
-    "model = scn.convprel(in_size=8, \n",
+    "model = scn.conv1dmodel(in_size=8, \n",
     "        out_size=256,\n",
     "        c1_nf=64,\n",
-    "        clayers=3,\n",
-    "        ker_size=3)\n",
+    "        clayers=5,\n",
+    "        ker_size=7)\n",
     "model.summary()\n",
     "#SVG(model_to_dot(model).create(prog='dot', format='svg'))\n"
    ]
@@ -113,7 +171,7 @@
    "source": [
     "x_t, x_v, y_t, y_v = train_test_split(x_train, y_train, test_size=0.2, random_state=42)\n",
     "history = model.fit(x_t, y_t,\n",
-    "                    batch_size=64,\n",
+    "                    batch_size=32,\n",
     "                    epochs=250, \n",
     "                    verbose=1,\n",
     "                    validation_data=(x_v, y_v))\n"
@@ -138,13 +196,23 @@
    "execution_count": null,
    "metadata": {
     "ExecuteTime": {
-     "end_time": "2018-09-04T15:49:08.556840Z",
-     "start_time": "2018-09-04T15:08:06.775220Z"
+     "start_time": "2018-09-05T05:27:49.316Z"
     },
     "scrolled": false
    },
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Fitting 3 folds for each of 18 candidates, totalling 54 fits\n"
+     ]
+    }
+   ],
    "source": [
+    "\n",
+    "\n",
+    "\n",
     "import warnings\n",
     "warnings.filterwarnings('ignore')\n",
     "\n",
@@ -152,7 +220,7 @@
     "\n",
     "def my_custom_score_func(ground_truth, predictions):\n",
     "    diff = np.abs(ground_truth - predictions)/np.abs(ground_truth)\n",
-    "    return np.mean(diff)\n",
+    "    return -100*np.mean(diff)\n",
     "\n",
     "\n",
     "\n",
@@ -175,13 +243,13 @@
     "\n",
     "\n",
     "\n",
-    "param_grid = dict(ker_size=[3, 5],\n",
+    "param_grid = dict(ker_size=[3, 5, 7],\n",
     "                  clayers=[3,4,5],\n",
     "                  batch_size=[32,64])                                  \n",
     "grid = GridSearchCV(estimator=model, \n",
     "                    param_grid=param_grid, \n",
     "                    n_jobs=1, \n",
-    "                    scoring='explained_variance',\n",
+    "                    scoring=my_score,\n",
     "                    verbose=1)\n",
     "grid_result = grid.fit(x_train, y_train)\n",
     "\n",
@@ -190,346 +258,59 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 23,
    "metadata": {
     "ExecuteTime": {
-     "end_time": "2018-09-04T14:25:50.558503Z",
-     "start_time": "2018-09-04T14:25:50.539227Z"
+     "end_time": "2018-09-06T11:13:09.815660Z",
+     "start_time": "2018-09-06T11:13:09.803034Z"
     }
    },
-   "outputs": [],
-   "source": [
-    "grid_result.cv_results_"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-09-04T14:26:16.796674Z",
-     "start_time": "2018-09-04T14:26:16.790511Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "grid_result.best_params_"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-09-04T15:01:18.552044Z",
-     "start_time": "2018-09-04T15:01:18.538960Z"
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[0.61959961 0.65246116 0.66099284 0.6769027  0.67712374 0.69844051\n",
+      " 0.70291055 0.70903711 0.7103426  0.71306676 0.7421975  0.74365013\n",
+      " 0.80096641 0.83683735 0.83717051 0.87958107 0.90866163 0.93305465]\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "[{'batch_size': 32, 'clayers': 5, 'ker_size': 7},\n",
+       " {'batch_size': 32, 'clayers': 3, 'ker_size': 7},\n",
+       " {'batch_size': 32, 'clayers': 4, 'ker_size': 7},\n",
+       " {'batch_size': 32, 'clayers': 3, 'ker_size': 5},\n",
+       " {'batch_size': 64, 'clayers': 5, 'ker_size': 5},\n",
+       " {'batch_size': 64, 'clayers': 3, 'ker_size': 7},\n",
+       " {'batch_size': 32, 'clayers': 4, 'ker_size': 5},\n",
+       " {'batch_size': 64, 'clayers': 4, 'ker_size': 7},\n",
+       " {'batch_size': 64, 'clayers': 5, 'ker_size': 7},\n",
+       " {'batch_size': 64, 'clayers': 3, 'ker_size': 5},\n",
+       " {'batch_size': 32, 'clayers': 5, 'ker_size': 5},\n",
+       " {'batch_size': 64, 'clayers': 4, 'ker_size': 5},\n",
+       " {'batch_size': 64, 'clayers': 4, 'ker_size': 3},\n",
+       " {'batch_size': 64, 'clayers': 5, 'ker_size': 3},\n",
+       " {'batch_size': 32, 'clayers': 3, 'ker_size': 3},\n",
+       " {'batch_size': 64, 'clayers': 3, 'ker_size': 3},\n",
+       " {'batch_size': 32, 'clayers': 4, 'ker_size': 3},\n",
+       " {'batch_size': 32, 'clayers': 5, 'ker_size': 3}]"
+      ]
+     },
+     "execution_count": 23,
+     "metadata": {},
+     "output_type": "execute_result"
     }
-   },
-   "outputs": [],
+   ],
    "source": [
     "bestidx = np.argsort(grid_result.cv_results_['mean_test_score'])\n",
-    "print(idx)\n",
-    "print(np.flip(idx))\n",
+    "print(grid_result.cv_results_['mean_test_score'][bestidx])\n",
     "parlist = grid_result.cv_results_['params']\n",
+    "runtlist = grid_result.cv_results_['mean_fit_time']\n",
     "bestlist = [parlist[indx] for indx in bestidx]\n",
-    "bestlist\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "'mean_fit_time': array([ 440.59602499,  481.73871398,  477.10135643,  504.1547633 ,\n",
-    "         488.27373465,  564.76856009,  571.80046884,  569.00620119,\n",
-    "         650.78964178,  740.00103672,  597.98015889,  659.03726633,\n",
-    "         476.14166268,  508.75317804,  505.10563159,  556.87875859,\n",
-    "         556.58007542,  542.48226706,  591.323385  ,  616.57070398,\n",
-    "         658.64879243,  891.65342418, 1055.23733377,  993.75831501,\n",
-    "         647.80959813,  629.8490928 ,  516.74331371,  787.65950712,\n",
-    "         635.2311732 ,  545.72071338,  904.7078863 ,  685.55618747,\n",
-    "         530.5135781 ,  815.14902997,  821.7310555 ,  685.34639025,\n",
-    "         240.90511258,  246.83409182,  259.09067996,  262.85167106,\n",
-    "         228.2099692 ,  392.25437156,  374.78024157,  452.21288816,\n",
-    "         459.90044618,  495.74129097,  501.12204981,  368.91637723,\n",
-    "         240.11106253,  237.93171946,  261.74841015,  265.67413298,\n",
-    "         274.72749496,  378.51081745,  436.21683431,  241.44164371,\n",
-    "         274.60629002,  330.47249166,  394.98302094,  318.71901139,\n",
-    "         237.12692181,  248.02535923,  254.61501988,  243.65626915,\n",
-    "         298.34629599,  273.75161084,  318.28928526,  312.33878072,\n",
-    "         309.61013468,  335.33733678,  316.16733519,  284.3205506 ]),\n",
-    "\n",
-    " 'params': [{'batch_size': 32, 'c1_nf': 32, 'clayers': 1, 'ker_size': 3},\n",
-    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 1, 'ker_size': 5},\n",
-    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 1, 'ker_size': 7},\n",
-    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 2, 'ker_size': 3},\n",
-    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 2, 'ker_size': 5},\n",
-    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 2, 'ker_size': 7},\n",
-    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 3, 'ker_size': 3},\n",
-    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 3, 'ker_size': 5},\n",
-    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 3, 'ker_size': 7},\n",
-    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 4, 'ker_size': 3},\n",
-    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 4, 'ker_size': 5},\n",
-    "  {'batch_size': 32, 'c1_nf': 32, 'clayers': 4, 'ker_size': 7},\n",
-    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 1, 'ker_size': 3},\n",
-    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 1, 'ker_size': 5},\n",
-    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 1, 'ker_size': 7},\n",
-    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 2, 'ker_size': 3},\n",
-    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 2, 'ker_size': 5},\n",
-    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 2, 'ker_size': 7},\n",
-    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 3, 'ker_size': 3},\n",
-    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 3, 'ker_size': 5},\n",
-    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 3, 'ker_size': 7},\n",
-    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 4, 'ker_size': 3},\n",
-    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 4, 'ker_size': 5},\n",
-    "  {'batch_size': 32, 'c1_nf': 64, 'clayers': 4, 'ker_size': 7},\n",
-    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 1, 'ker_size': 3},\n",
-    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 1, 'ker_size': 5},\n",
-    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 1, 'ker_size': 7},\n",
-    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 2, 'ker_size': 3},\n",
-    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 2, 'ker_size': 5},\n",
-    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 2, 'ker_size': 7},\n",
-    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 3, 'ker_size': 3},\n",
-    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 3, 'ker_size': 5},\n",
-    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 3, 'ker_size': 7},\n",
-    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 4, 'ker_size': 3},\n",
-    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 4, 'ker_size': 5},\n",
-    "  {'batch_size': 32, 'c1_nf': 96, 'clayers': 4, 'ker_size': 7},\n",
-    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 1, 'ker_size': 3},\n",
-    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 1, 'ker_size': 5},\n",
-    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 1, 'ker_size': 7},\n",
-    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 2, 'ker_size': 3},\n",
-    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 2, 'ker_size': 5},\n",
-    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 2, 'ker_size': 7},\n",
-    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 3, 'ker_size': 3},\n",
-    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 3, 'ker_size': 5},\n",
-    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 3, 'ker_size': 7},\n",
-    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 4, 'ker_size': 3},\n",
-    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 4, 'ker_size': 5},\n",
-    "  {'batch_size': 64, 'c1_nf': 32, 'clayers': 4, 'ker_size': 7},\n",
-    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 1, 'ker_size': 3},\n",
-    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 1, 'ker_size': 5},\n",
-    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 1, 'ker_size': 7},\n",
-    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 2, 'ker_size': 3},\n",
-    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 2, 'ker_size': 5},\n",
-    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 2, 'ker_size': 7},\n",
-    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 3, 'ker_size': 3},\n",
-    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 3, 'ker_size': 5},\n",
-    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 3, 'ker_size': 7},\n",
-    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 4, 'ker_size': 3},\n",
-    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 4, 'ker_size': 5},\n",
-    "  {'batch_size': 64, 'c1_nf': 64, 'clayers': 4, 'ker_size': 7},\n",
-    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 1, 'ker_size': 3},\n",
-    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 1, 'ker_size': 5},\n",
-    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 1, 'ker_size': 7},\n",
-    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 2, 'ker_size': 3},\n",
-    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 2, 'ker_size': 5},\n",
-    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 2, 'ker_size': 7},\n",
-    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 3, 'ker_size': 3},\n",
-    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 3, 'ker_size': 5},\n",
-    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 3, 'ker_size': 7},\n",
-    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 4, 'ker_size': 3},\n",
-    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 4, 'ker_size': 5},\n",
-    "  {'batch_size': 64, 'c1_nf': 96, 'clayers': 4, 'ker_size': 7}],\n",
-    "\n",
-    "\n",
-    " 'mean_test_score': array([0.00235561, 0.00175559, 0.00191115, 0.00255561, 0.00228894,\n",
-    "        0.00233339, 0.00240005, 0.00260006, 0.00262228, 0.00222227,\n",
-    "        0.00222227, 0.00235561, 0.00191115, 0.0021556 , 0.00242228,\n",
-    "        0.00217783, 0.00217783, 0.00233339, 0.00231116, 0.00202227,\n",
-    "        0.00206671, 0.00251117, 0.00237783, 0.00253339, 0.00197782,\n",
-    "        0.00222227, 0.00220005, 0.0019556 , 0.00208894, 0.00240005,\n",
-    "        0.00220005, 0.00204449, 0.00222227, 0.00228894, 0.00231116,\n",
-    "        0.00246672, 0.00200004, 0.00182226, 0.00164448, 0.00197782,\n",
-    "        0.0026445 , 0.00208894, 0.00222227, 0.00248894, 0.00217783,\n",
-    "        0.00208894, 0.00253339, 0.0024445 , 0.00204449, 0.00188893,\n",
-    "        0.00197782, 0.00233339, 0.00231116, 0.00231116, 0.00233339,\n",
-    "        0.0021556 , 0.00231116, 0.0021556 , 0.00220005, 0.00233339,\n",
-    "        0.0021556 , 0.00220005, 0.00251117, 0.00191115, 0.00197782,\n",
-    "        0.0021556 , 0.00224449, 0.00231116, 0.00228894, 0.00220005,\n",
-    "        0.00235561, 0.00211116]),\n",
-    " 'std_test_score': array([5.45313915e-04, 1.36982222e-04, 5.71830319e-04, 3.09583745e-04,\n",
-    "        4.63034070e-04, 1.88601629e-04, 3.57010463e-04, 2.49388000e-04,\n",
-    "        2.06134688e-04, 4.75566024e-04, 1.74937538e-04, 4.01290372e-04,\n",
-    "        2.06146739e-04, 5.14564491e-04, 3.28118192e-04, 3.09506255e-04,\n",
-    "        3.45776617e-04, 5.65777616e-04, 6.28186970e-05, 2.45451156e-04,\n",
-    "        3.03064138e-04, 4.69393085e-04, 3.70577110e-04, 2.37355340e-04,\n",
-    "        5.14534288e-04, 2.99865023e-04, 1.08929121e-04, 3.28165699e-04,\n",
-    "        1.91227745e-04, 1.44095217e-04, 1.96333991e-04, 1.13377551e-04,\n",
-    "        6.28899342e-05, 4.93994775e-04, 5.05855189e-04, 4.25227642e-04,\n",
-    "        5.25013167e-04, 1.66308696e-04, 1.91169192e-04, 7.89511257e-04,\n",
-    "        3.09609747e-04, 2.20000709e-04, 1.36980546e-04, 2.99872491e-04,\n",
-    "        2.06155046e-04, 3.70481852e-04, 1.88604774e-04, 2.74052629e-04,\n",
-    "        4.15813294e-04, 2.99823569e-04, 2.06148816e-04, 1.44067451e-04,\n",
-    "        3.50039135e-04, 2.79388549e-04, 3.81084580e-04, 3.62491814e-04,\n",
-    "        2.26678399e-04, 4.08631950e-04, 4.35536821e-04, 2.49505205e-04,\n",
-    "        4.53303035e-04, 1.44064707e-04, 5.77012935e-04, 8.31603039e-05,\n",
-    "        5.34338425e-04, 2.79402571e-04, 2.45459743e-04, 1.36996891e-04,\n",
-    "        3.86260180e-04, 1.88498951e-04, 3.09579276e-04, 3.70571422e-04]),\n",
-    " 'rank_test_score': array([16, 71, 66,  4, 30, 19, 13,  3,  2, 34, 34, 16, 66, 47, 12, 44, 44,\n",
-    "        19, 24, 59, 56,  7, 15,  5, 61, 34, 39, 65, 53, 13, 39, 57, 34, 30,\n",
-    "        24, 10, 60, 70, 72, 61,  1, 53, 34,  9, 44, 53,  5, 11, 57, 69, 61,\n",
-    "        19, 24, 24, 19, 47, 24, 47, 39, 19, 47, 39,  7, 66, 61, 47, 33, 24,\n",
-    "        30, 39, 16, 52], dtype=int32),\n",
-    "\n",
-    "\n",
-    " 'mean_train_score': array([0.00231116, 0.00187782, 0.0018556 , 0.00231116, 0.00222227,\n",
-    "        0.00217783, 0.00228894, 0.00236672, 0.00232227, 0.00252228,\n",
-    "        0.00213338, 0.00241117, 0.00214449, 0.00204449, 0.00235561,\n",
-    "        0.00205561, 0.00217783, 0.00238894, 0.00213338, 0.00230005,\n",
-    "        0.0021556 , 0.0023445 , 0.00238894, 0.00236672, 0.00203338,\n",
-    "        0.00208894, 0.00232228, 0.00203338, 0.00258895, 0.00216671,\n",
-    "        0.00221117, 0.00217783, 0.0022556 , 0.00224449, 0.00246672,\n",
-    "        0.00240006, 0.00187782, 0.00183338, 0.00153337, 0.00223338,\n",
-    "        0.00186671, 0.00204449, 0.00218894, 0.00227783, 0.00203338,\n",
-    "        0.00257784, 0.00235561, 0.0023445 , 0.00200004, 0.00210005,\n",
-    "        0.00204449, 0.00247784, 0.00233338, 0.00230005, 0.00223338,\n",
-    "        0.00222227, 0.00228894, 0.0023445 , 0.00228894, 0.00248895,\n",
-    "        0.00188893, 0.00211116, 0.00233339, 0.00236672, 0.00213338,\n",
-    "        0.00204449, 0.00248895, 0.00238895, 0.00233338, 0.00231116,\n",
-    "        0.0023445 , 0.00241116]),\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n",
-    "{'mean_fit_time': array([ 893.33654523,  898.53078222, 1119.14394736, 1130.1775128 ,\n",
-    "         956.51246222,  964.45365715, 1209.3984166 , 1582.91039157,\n",
-    "        1394.26560704, 1616.26630108, 1266.47200227, 1116.83488099,\n",
-    "        1205.42738708, 1201.92515103, 1210.92550143]),\n",
-    " 'std_fit_time': array([  3.08891285,   6.81113186, 212.20371026, 238.93357922,\n",
-    "          6.97112622,  18.87349827, 223.00445851,  57.7875855 ,\n",
-    "        100.70476936,  43.95356933, 134.83849082,  11.29690679,\n",
-    "          5.42330543,   6.19267952,   4.92641743]),\n",
-    " 'params': [{'N_hidden': 1, 'N_neurons': 250},\n",
-    "  {'N_hidden': 1, 'N_neurons': 500},\n",
-    "  {'N_hidden': 1, 'N_neurons': 1000},\n",
-    "  {'N_hidden': 2, 'N_neurons': 250},\n",
-    "  {'N_hidden': 2, 'N_neurons': 500},\n",
-    "  {'N_hidden': 2, 'N_neurons': 1000},\n",
-    "  {'N_hidden': 3, 'N_neurons': 250},\n",
-    "  {'N_hidden': 3, 'N_neurons': 500},\n",
-    "  {'N_hidden': 3, 'N_neurons': 1000},\n",
-    "  {'N_hidden': 4, 'N_neurons': 250},\n",
-    "  {'N_hidden': 4, 'N_neurons': 500},\n",
-    "  {'N_hidden': 4, 'N_neurons': 1000},\n",
-    "  {'N_hidden': 5, 'N_neurons': 250},\n",
-    "  {'N_hidden': 5, 'N_neurons': 500},\n",
-    "  {'N_hidden': 5, 'N_neurons': 1000}],\n",
-    " 'split0_test_score': array([0.00235, 0.00225, 0.0025 , 0.0024 , 0.0026 , 0.0021 , 0.00165,\n",
-    "        0.0023 , 0.00255, 0.00255, 0.0024 , 0.0027 , 0.0021 , 0.00225,\n",
-    "        0.0022 ]),\n",
-    " 'split1_test_score': array([0.0024 , 0.00225, 0.0022 , 0.0022 , 0.00235, 0.0022 , 0.0021 ,\n",
-    "        0.00195, 0.00215, 0.0022 , 0.00185, 0.00195, 0.002  , 0.00195,\n",
-    "        0.0021 ]),\n",
-    " 'split2_test_score': array([0.00255, 0.00315, 0.00275, 0.00295, 0.00355, 0.0032 , 0.00275,\n",
-    "        0.0031 , 0.0028 , 0.00305, 0.00305, 0.00355, 0.00275, 0.00285,\n",
-    "        0.00305]),\n",
-    " 'mean_test_score': array([0.00243333, 0.00255   , 0.00248333, 0.00251667, 0.00283333,\n",
-    "        0.0025    , 0.00216667, 0.00245   , 0.0025    , 0.0026    ,\n",
-    "        0.00243333, 0.00273333, 0.00228333, 0.00235   , 0.00245   ]),\n",
-    " 'std_test_score': array([8.49836586e-05, 4.24264069e-04, 2.24845626e-04, 3.17104960e-04,\n",
-    "        5.16935414e-04, 4.96655481e-04, 4.51540573e-04, 4.81317636e-04,\n",
-    "        2.67706307e-04, 3.48807492e-04, 4.90464632e-04, 6.53622385e-04,\n",
-    "        3.32498956e-04, 3.74165739e-04, 4.26223728e-04]),\n",
-    " 'rank_test_score': array([11,  4,  8,  5,  1,  6, 15,  9,  6,  3, 11,  2, 14, 13,  9],\n",
-    "       dtype=int32),\n",
-    " 'split0_train_score': array([0.00255 , 0.0024  , 0.0023  , 0.00255 , 0.00255 , 0.002375,\n",
-    "        0.002125, 0.002625, 0.0025  , 0.002775, 0.002625, 0.0027  ,\n",
-    "        0.0023  , 0.00245 , 0.002275]),\n",
-    " 'split1_train_score': array([0.002975, 0.0025  , 0.00255 , 0.0028  , 0.0029  , 0.002475,\n",
-    "        0.002575, 0.0027  , 0.002575, 0.002375, 0.002475, 0.002475,\n",
-    "        0.002325, 0.002425, 0.002375]),\n",
-    " 'split2_train_score': array([0.00215 , 0.0022  , 0.00215 , 0.002375, 0.00205 , 0.002175,\n",
-    "        0.00215 , 0.0022  , 0.002   , 0.00235 , 0.00235 , 0.002525,\n",
-    "        0.00215 , 0.002175, 0.002175]),\n",
-    " 'mean_train_score': array([0.00255833, 0.00236667, 0.00233333, 0.002575  , 0.0025    ,\n",
-    "        0.00234167, 0.00228333, 0.00250833, 0.00235833, 0.0025    ,\n",
-    "        0.00248333, 0.00256667, 0.00225833, 0.00235   , 0.002275  ]),\n",
-    " 'std_train_score': array([3.36856382e-04, 1.24721913e-04, 1.64991582e-04, 1.74403746e-04,\n",
-    "        3.48807492e-04, 1.24721913e-04, 2.06491862e-04, 2.20164080e-04,\n",
-    "        2.55223214e-04, 1.94722024e-04, 1.12422813e-04, 9.64653075e-05,\n",
-    "        7.72801541e-05, 1.24163870e-04, 8.16496581e-05])}\n",
-    "        \n",
-    " \n",
-    " {'mean_fit_time': array([  685.01906315,  1809.28454868,   336.60541034,   878.23016135]),\n",
-    " 'mean_score_time': array([ 1.38006322,  1.27389534,  0.6934317 ,  0.69225407]),\n",
-    " 'mean_test_score': array([ 0.00241667,  0.00251667,  0.00243333,  0.00261667]),\n",
-    " 'mean_train_score': array([ 0.00245833,  0.00236667,  0.00248333,  0.00253333]),\n",
-    " 'param_batch_size': masked_array(data = [32 32 64 64],\n",
-    "              mask = [False False False False],\n",
-    "        fill_value = ?),\n",
-    " 'param_epochs': masked_array(data = [200 500 200 500],\n",
-    "              mask = [False False False False],\n",
-    "        fill_value = ?),\n",
-    " 'params': ({'batch_size': 32, 'epochs': 200},\n",
-    "  {'batch_size': 32, 'epochs': 500},\n",
-    "  {'batch_size': 64, 'epochs': 200},\n",
-    "  {'batch_size': 64, 'epochs': 500}),\n",
-    " 'rank_test_score': array([4, 2, 3, 1], dtype=int32),\n",
-    " 'split0_test_score': array([ 0.0021 ,  0.00225,  0.00215,  0.00225]),\n",
-    " 'split0_train_score': array([ 0.00235 ,  0.0023  ,  0.002625,  0.002575]),\n",
-    " 'split1_test_score': array([ 0.00225,  0.00225,  0.00215,  0.00235]),\n",
-    " 'split1_train_score': array([ 0.002675,  0.002725,  0.002675,  0.002825]),\n",
-    " 'split2_test_score': array([ 0.0029 ,  0.00305,  0.003  ,  0.00325]),\n",
-    " 'split2_train_score': array([ 0.00235 ,  0.002075,  0.00215 ,  0.0022  ]),\n",
-    " 'std_fit_time': array([  27.85582158,  121.41697465,    1.58335506,   11.64839192]),\n",
-    " 'std_score_time': array([ 0.01602076,  0.06291871,  0.03384719,  0.05541393]),\n",
-    " 'std_test_score': array([ 0.00034721,  0.00037712,  0.00040069,  0.00044969]),\n",
-    " 'std_train_score': array([ 0.00015321,  0.00026952,  0.00023658,  0.00025685])}\n",
-    "        \n",
-    "        \n",
-    "        'mean_fit_time': array([1236.77363722, 1263.8373781 , 1283.07971772,  617.23694984,\n",
-    "         644.64875857,  630.75466394]),\n",
-    " 'std_fit_time': array([15.23634435,  1.04774932, 70.7173362 , 19.87266061,  3.13235316,\n",
-    "        19.13357172]),\n",
-    " 'mean_score_time': array([1.9509182 , 2.09144211, 2.07234033, 1.05850196, 1.09700545,\n",
-    "        1.07024908]),\n",
-    " 'std_score_time': array([0.09494565, 0.09207867, 0.09335411, 0.04954752, 0.04209056,\n",
-    "        0.05320864]),\n",
-    " 'param_batch_size': masked_array(data=[32, 32, 32, 64, 64, 64],\n",
-    "              mask=[False, False, False, False, False, False],\n",
-    "        fill_value='?',\n",
-    "             dtype=object),\n",
-    " 'param_ker_size': masked_array(data=[3, 5, 7, 3, 5, 7],\n",
-    "              mask=[False, False, False, False, False, False],\n",
-    "        fill_value='?',\n",
-    "             dtype=object),\n",
-    " 'params': [{'batch_size': 32, 'ker_size': 3},\n",
-    "  {'batch_size': 32, 'ker_size': 5},\n",
-    "  {'batch_size': 32, 'ker_size': 7},\n",
-    "  {'batch_size': 64, 'ker_size': 3},\n",
-    "  {'batch_size': 64, 'ker_size': 5},\n",
-    "  {'batch_size': 64, 'ker_size': 7}],\n",
-    " 'split0_test_score': array([0.00225, 0.0017 , 0.0027 , 0.00225, 0.00205, 0.0024 ]),\n",
-    " 'split1_test_score': array([0.0021 , 0.0023 , 0.0023 , 0.0019 , 0.002  , 0.00165]),\n",
-    " 'split2_test_score': array([0.00325, 0.00215, 0.0021 , 0.00245, 0.0024 , 0.0024 ]),\n",
-    " 'mean_test_score': array([0.00253333, 0.00205   , 0.00236667, 0.0022    , 0.00215   ,\n",
-    "        0.00215   ]),\n",
-    " 'std_test_score': array([0.00051045, 0.00025495, 0.00024944, 0.0002273 , 0.00017795,\n",
-    "        0.00035355]),\n",
-    " 'rank_test_score': array([1, 6, 2, 3, 4, 4], dtype=int32),\n",
-    " 'split0_train_score': array([0.001875, 0.001625, 0.002525, 0.002175, 0.0019  , 0.002275]),\n",
-    " 'split1_train_score': array([0.0027  , 0.00275 , 0.00245 , 0.0022  , 0.002125, 0.002475]),\n",
-    " 'split2_train_score': array([0.0024  , 0.00205 , 0.0019  , 0.0021  , 0.00225 , 0.001775]),\n",
-    " 'mean_train_score': array([0.002325  , 0.00214167, 0.00229167, 0.00215833, 0.00209167,\n",
-    "        0.002175  ]),\n",
-    " 'std_train_score': array([3.40954542e-04, 4.63830668e-04, 2.78637558e-04, 4.24918293e-05,\n",
-    "        1.44817893e-04, 2.94392029e-04])}\n",
-    "        "
+    "runlist = [runtlist[indx]/60.0 for indx in bestidx]\n",
+    "bestlist"
    ]
   },
   {

+ 53 - 0
hyptune.py

@@ -0,0 +1,53 @@
+from sklearn.model_selection import GridSearchCV
+from keras.models import Sequential
+from keras.layers import Dense
+from keras.wrappers.scikit_learn import KerasRegressor
+import scnets as scn
+from sklearn.metrics import make_scorer
+import numpy as np
+import pandas as pd
+
+def print_tuning_results(cvresults, modelfunc):
+    pd.set_option('precision',2)
+
+    bestidx = np.argsort(cvresults['mean_test_score'])
+    scorelist = cvresults['mean_test_score'][bestidx]
+    parlist = cvresults['params']
+    runtlist = (1/60.0)*(cvresults['mean_fit_time'])
+    runtlist = runtlist.astype('int64')
+    bestlist = [parlist[indx] for indx in bestidx]
+    par_count =[]
+    for elem in bestlist:
+        model = modelfunc(**elem)
+        par_count.append(model.count_params())
+
+    parkeylist = [key for key in bestlist[0]]
+    columns = parkeylist + ['MRE(%)', 'Total Params', 'mean_fit_time']
+
+    df = pd.DataFrame(columns=columns)
+    for colno in np.arange(len(bestlist[0])):
+        df[columns[colno]] = [elem[parkeylist[colno]] for elem in bestlist]
+    df[columns[len(bestlist[0]) + 0]] = scorelist
+    df[columns[len(bestlist[0])+  1]] = par_count
+    df[columns[len(bestlist[0])+  2]] = runtlist
+
+    return df
+
+# This will return the MRE error as score
+def mre_score_func(ground_truth, predictions):
+    diff = np.abs(ground_truth - predictions)/np.abs(ground_truth)
+    return -100*np.mean(diff)
+
+def get_cv_grid(modelfunc, param_grid, num_epochs, x_train, y_train):
+    mre_score = make_scorer(mre_score_func, greater_is_better=False)
+    #build estimator
+    model = KerasRegressor(build_fn=modelfunc, 
+                           epochs=num_epochs,
+                           verbose=0)
+    grid = GridSearchCV(estimator=model, 
+                    param_grid=param_grid, 
+                    n_jobs=1, 
+                    scoring=mre_score,
+                    verbose=1)
+    grid_result = grid.fit(x_train, y_train)
+    return grid_result.cv_results_

+ 359 - 0
lkyrel_hyp.ipynb

@@ -0,0 +1,359 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# we will study the hyperparamter tuning of fully connected scatternet\n",
+    "\n",
+    "# first find optimal number of layers and neuron numbers\n",
+    "# second optimize the batch size and number of epochs for the best learned architecture\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Loading the dataset here"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-04T17:53:30.640173Z",
+     "start_time": "2018-09-04T17:53:30.198023Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Dataset has been loaded\n",
+      "x-train (44999, 8)\n",
+      "x-test  (55001, 8)\n",
+      "y-train (44999, 256)\n",
+      "y-test  (55001, 256)\n"
+     ]
+    }
+   ],
+   "source": [
+    "import numpy as np\n",
+    "\n",
+    "import h5py\n",
+    "from sklearn.model_selection import train_test_split\n",
+    "\n",
+    "#now load this dataset \n",
+    "h5f = h5py.File('./datasets/s8_sio2tio2_v2.h5','r')\n",
+    "X = h5f['sizes'][:]\n",
+    "Y = h5f['spectrum'][:]\n",
+    "\n",
+    "#get the ranges of the loaded data\n",
+    "num_layers = X.shape[1]\n",
+    "num_lpoints = Y.shape[1]\n",
+    "size_max = np.amax(X)\n",
+    "size_min = np.amin(X)\n",
+    "size_av = 0.5*(size_max + size_min)\n",
+    "\n",
+    "#this information is not given in the dataset\n",
+    "lam_min = 300\n",
+    "lam_max = 1200\n",
+    "lams = np.linspace(lam_min, lam_max, num_lpoints)\n",
+    "\n",
+    "#create a train - test split of the dataset\n",
+    "x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.55, random_state=42)\n",
+    "\n",
+    "# normalize inputs \n",
+    "x_train = (x_train - 50)/20 \n",
+    "x_test = (x_test - 50)/20 \n",
+    "\n",
+    "print(\"Dataset has been loaded\")\n",
+    "print(\"x-train\", x_train.shape)\n",
+    "print(\"x-test \", x_test.shape)\n",
+    "print(\"y-train\", y_train.shape)\n",
+    "print(\"y-test \", y_test.shape)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### create models here"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 34,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-06T07:20:23.347996Z",
+     "start_time": "2018-09-06T07:20:22.979988Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "_________________________________________________________________\n",
+      "Layer (type)                 Output Shape              Param #   \n",
+      "=================================================================\n",
+      "first (Dense)                (None, 256)               2304      \n",
+      "_________________________________________________________________\n",
+      "p_re_lu_964 (PReLU)          (None, 256)               256       \n",
+      "_________________________________________________________________\n",
+      "Reshape1 (Reshape)           (None, 4, 64)             0         \n",
+      "_________________________________________________________________\n",
+      "Up1 (UpSampling1D)           (None, 8, 64)             0         \n",
+      "_________________________________________________________________\n",
+      "Conv1 (Conv1D)               (None, 8, 64)             28736     \n",
+      "_________________________________________________________________\n",
+      "p_re_lu_965 (PReLU)          (None, 8, 64)             512       \n",
+      "_________________________________________________________________\n",
+      "Conv2 (Conv1D)               (None, 8, 32)             14368     \n",
+      "_________________________________________________________________\n",
+      "p_re_lu_966 (PReLU)          (None, 8, 32)             256       \n",
+      "_________________________________________________________________\n",
+      "Conv3 (Conv1D)               (None, 8, 32)             7200      \n",
+      "_________________________________________________________________\n",
+      "p_re_lu_967 (PReLU)          (None, 8, 32)             256       \n",
+      "_________________________________________________________________\n",
+      "Conv4 (Conv1D)               (None, 8, 32)             7200      \n",
+      "_________________________________________________________________\n",
+      "p_re_lu_968 (PReLU)          (None, 8, 32)             256       \n",
+      "_________________________________________________________________\n",
+      "Conv5 (Conv1D)               (None, 8, 32)             7200      \n",
+      "_________________________________________________________________\n",
+      "p_re_lu_969 (PReLU)          (None, 8, 32)             256       \n",
+      "_________________________________________________________________\n",
+      "flatten_168 (Flatten)        (None, 256)               0         \n",
+      "=================================================================\n",
+      "Total params: 68,800\n",
+      "Trainable params: 68,800\n",
+      "Non-trainable params: 0\n",
+      "_________________________________________________________________\n"
+     ]
+    }
+   ],
+   "source": [
+    "import scnets as scn\n",
+    "from IPython.display import SVG\n",
+    "from keras.utils.vis_utils import model_to_dot\n",
+    "\n",
+    "#define and visualize the model here\n",
+    "#model = scn.fullycon(num_layers, num_lpoints, 4, 500, 2)\n",
+    "\n",
+    "model = scn.convprel(in_size=8, \n",
+    "        out_size=256,\n",
+    "        c1_nf=64,\n",
+    "        clayers=4,\n",
+    "        ker_size=7)\n",
+    "model.summary()\n",
+    "#SVG(model_to_dot(model).create(prog='dot', format='svg'))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-04T08:53:28.967090Z",
+     "start_time": "2018-09-04T08:42:58.990636Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "x_t, x_v, y_t, y_v = train_test_split(x_train, y_train, test_size=0.2, random_state=42)\n",
+    "history = model.fit(x_t, y_t,\n",
+    "                    batch_size=64,\n",
+    "                    epochs=250, \n",
+    "                    verbose=1,\n",
+    "                    validation_data=(x_v, y_v))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-04T09:13:20.325358Z",
+     "start_time": "2018-09-04T09:13:20.044281Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "scn.plot_training_history(history, 64*2.56)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "start_time": "2018-09-05T05:27:22.456Z"
+    },
+    "scrolled": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Fitting 3 folds for each of 18 candidates, totalling 54 fits\n"
+     ]
+    }
+   ],
+   "source": [
+    "from keras import backend as K\n",
+    "\n",
+    "\n",
+    "import warnings\n",
+    "warnings.filterwarnings('ignore')\n",
+    "\n",
+    "from sklearn.metrics import fbeta_score, make_scorer\n",
+    "\n",
+    "def my_custom_score_func(ground_truth, predictions):\n",
+    "    diff = np.abs(ground_truth - predictions)/np.abs(ground_truth)\n",
+    "    return -100*np.mean(diff)\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "from sklearn.model_selection import GridSearchCV\n",
+    "from keras.models import Sequential\n",
+    "from keras.layers import Dense\n",
+    "from keras.wrappers.scikit_learn import KerasRegressor\n",
+    "import scnets as scn\n",
+    "#model = KerasClassifier(build_fn=scn.fullycon, in_size=8, out_size=250, N_gpus=1, epochs=500, verbose=0)\n",
+    "\n",
+    "model = KerasRegressor(build_fn=scn.convprel, \n",
+    "                        in_size=8, \n",
+    "                        out_size=256, \n",
+    "                        c1_nf=64,\n",
+    "                        clayers=3,\n",
+    "                        ker_size=3,\n",
+    "                        epochs=250, \n",
+    "                        verbose=0)\n",
+    "my_score = make_scorer(my_custom_score_func, greater_is_better=False)\n",
+    "\n",
+    "\n",
+    "\n",
+    "param_grid = dict(ker_size=[3, 5, 7],\n",
+    "                  clayers=[3,4,5],\n",
+    "                  batch_size=[32,64])                                  \n",
+    "\n",
+    "#param_grid = dict(ker_size=[3])       \n",
+    "\n",
+    "grid = GridSearchCV(estimator=model, \n",
+    "                    param_grid=param_grid, \n",
+    "                    n_jobs=1, \n",
+    "                    scoring=my_score,\n",
+    "                    verbose=1)\n",
+    "grid_result = grid.fit(x_train, y_train)\n",
+    "\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 36,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-06T11:17:21.163273Z",
+     "start_time": "2018-09-06T11:17:21.148700Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[0.53893412 0.56550794 0.56970085 0.57164532 0.57234211 0.58644457\n",
+      " 0.59618878 0.59918908 0.60326499 0.6071571  0.61401865 0.6255378\n",
+      " 0.64548925 0.65486097 0.67364895 0.68902212 0.71184671 0.71246331]\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "[{'batch_size': 32, 'clayers': 4, 'ker_size': 7},\n",
+       " {'batch_size': 64, 'clayers': 5, 'ker_size': 7},\n",
+       " {'batch_size': 32, 'clayers': 3, 'ker_size': 7},\n",
+       " {'batch_size': 64, 'clayers': 3, 'ker_size': 7},\n",
+       " {'batch_size': 32, 'clayers': 4, 'ker_size': 5},\n",
+       " {'batch_size': 64, 'clayers': 4, 'ker_size': 5},\n",
+       " {'batch_size': 32, 'clayers': 5, 'ker_size': 5},\n",
+       " {'batch_size': 32, 'clayers': 3, 'ker_size': 5},\n",
+       " {'batch_size': 32, 'clayers': 4, 'ker_size': 3},\n",
+       " {'batch_size': 32, 'clayers': 5, 'ker_size': 7},\n",
+       " {'batch_size': 64, 'clayers': 4, 'ker_size': 7},\n",
+       " {'batch_size': 32, 'clayers': 5, 'ker_size': 3},\n",
+       " {'batch_size': 64, 'clayers': 5, 'ker_size': 5},\n",
+       " {'batch_size': 64, 'clayers': 5, 'ker_size': 3},\n",
+       " {'batch_size': 64, 'clayers': 3, 'ker_size': 5},\n",
+       " {'batch_size': 32, 'clayers': 3, 'ker_size': 3},\n",
+       " {'batch_size': 64, 'clayers': 4, 'ker_size': 3},\n",
+       " {'batch_size': 64, 'clayers': 3, 'ker_size': 3}]"
+      ]
+     },
+     "execution_count": 36,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "bestidx = np.argsort(grid_result.cv_results_['mean_test_score'])\n",
+    "print(grid_result.cv_results_['mean_test_score'][bestidx])\n",
+    "parlist = grid_result.cv_results_['params']\n",
+    "runtlist = grid_result.cv_results_['mean_fit_time']\n",
+    "bestlist = [parlist[indx] for indx in bestidx]\n",
+    "runlist = [runtlist[indx]/60.0 for indx in bestidx]\n",
+    "bestlist"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.6"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 2 - 0
n2_cvsrch.dat

@@ -0,0 +1,2 @@
+,num_units,MRE(%),Total Params,mean_fit_time
+0,2,5.172808148276762,4768,0

File diff suppressed because it is too large
+ 561 - 61
scatternet.ipynb


+ 109 - 35
scnets.py

@@ -10,8 +10,28 @@ from keras.optimizers import Adam
 import numpy as np
 import matplotlib.pyplot as plt
 from keras.layers import PReLU
+from keras.models import Model
+from keras.layers import Input, Add
+from keras.layers.normalization import BatchNormalization
+from keras.layers import PReLU
+from keras.utils import to_channels_first
+
 
 
+#staging area for new models
+def plot_training_history(history, red_factor):
+    loss, val_loss = history.history['loss'], history.history['val_loss']
+    loss = np.asarray(loss)/red_factor
+    val_loss = np.asarray(val_loss)/red_factor
+    epochs = len(loss)
+
+    fig, axs = plt.subplots(1,1, figsize=(5,5))
+    axs.semilogy(np.arange(1, epochs + 1), loss, label='train error')
+    axs.semilogy(np.arange(1, epochs + 1), val_loss, label='validation error')
+    axs.set_xlabel('Epoch number')
+    axs.set_ylabel('Mean Relative Error (MRE) (%)')
+    axs.legend(loc="best")
+    
 #function to test performance on testset
 def calc_mre(y_true, y_pred):
     y_err = 100*np.abs(y_true - y_pred)/y_true
@@ -32,9 +52,44 @@ def relerr_loss(y_true, y_pred):
 
 
 
+def fullycon( in_size=8, 
+             out_size=256, 
+             batch_size=32,
+             N_hidden=3, 
+             N_neurons=250, 
+             N_gpus=1):
+    """
+    Returns a fully-connected model which will take a normalized size vector and return a
+    spectrum
+    in_size: length of the size vector
+    out_size: length of the spectrum vector
+    N_hidden: number of hidden layers
+    N_neurons: number of neurons in each of the hidden layers
+    """
+    model = Sequential()
+    model.add(Dense(N_neurons, input_dim=in_size, 
+                    kernel_initializer='normal', activation='relu',
+                    name='first' ))
+    for h in np.arange(N_hidden):
+        lname = "H"+str(h)
+        model.add(Dense(N_neurons, 
+                        kernel_initializer='normal', activation='relu', name=lname ))
+
+    model.add(Dense(out_size, kernel_initializer='normal', name='last'))
+
+    # Compile model
+    if N_gpus == 1:
+        model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K])
+    else:
+        gpu_list = ["gpu(%d)" % i for i in range(N_gpus)]
+        model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K], context = gpu_list)
+    return model
+
+
 
 def conv1dmodel(in_size=8, 
         out_size=256,
+        batch_size=32,
         c1_nf=64,
         clayers=2,
         ker_size=3):
@@ -72,6 +127,7 @@ def conv1dmodel(in_size=8,
 
 def convprel(in_size=8, 
         out_size=256,
+        batch_size=32,
         c1_nf=64,
         clayers=2,
         ker_size=3):
@@ -110,51 +166,69 @@ def convprel(in_size=8,
     return model
 
 
+def resblock2(Input):
+    #Input = to_channels_first(Input)
+    Output = Conv1D(filters=32, kernel_size=3, strides=1, padding='same', 
+            dilation_rate=1, 
+            kernel_initializer='normal')(Input)
+    #Output = BatchNormalization()(Output)
+    Output = Activation('relu')(Output)  
+    Output = Conv1D(filters=32, kernel_size=3, strides=1, padding='same', 
+            dilation_rate=1, 
+            kernel_initializer='normal')(Output)
+    Output = Add()([Output, Input])
+    return Output
+
+
 
+def resblock(Input, ker_size, red_dim):
+    #Input = to_channels_first(Input)
+    Output = Conv1D(filters=red_dim, kernel_size=1, strides=1, padding='same', 
+            dilation_rate=1, 
+            kernel_initializer='normal')(Input)
+    Output = BatchNormalization()(Output)
+    Output = Activation('relu')(Output)  
+    Output = Conv1D(filters=red_dim, kernel_size=ker_size, strides=1, padding='same', 
+            dilation_rate=1, 
+            kernel_initializer='normal')(Output)
+    Output = BatchNormalization()(Output)
+    Output = Activation('relu')(Output)  
+    Output = Conv1D(filters=32, kernel_size=1, strides=1, padding='same', 
+            dilation_rate=1, 
+            kernel_initializer='normal')(Output)
+    Output = BatchNormalization()(Output)
+    Output = Add()([Output, Input])
+    Output = PReLU(alpha_initializer='zeros', alpha_regularizer=None)(Output)
+    return Output
 
+def resnet(in_size=8, 
+        out_size=256,
+        num_units=2,
+        red_dim=8,
+        batch_size=32,
+        ker_size=3):
+    
+    a = Input(shape=(in_size,))
+    first = Dense(256, kernel_initializer='normal')(a)
+    first = PReLU(alpha_initializer='zeros', alpha_regularizer=None)(first)
+    first = Reshape((8,32))(first)
+   
+    for units in np.arange(num_units):
+        first = resblock(first, ker_size, red_dim)
 
+    last = Flatten()(first)
 
+    model = Model(inputs=a, outputs=last)
 
+    #compile model
+    model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K])
+
+    return model
 
 
 
-def fullycon( in_size=8, out_size=250, N_hidden=3, N_neurons=250, N_gpus=1):
-    """
-    Returns a fully-connected model which will take a normalized size vector and return a
-    spectrum
-    in_size: length of the size vector
-    out_size: length of the spectrum vector
-    N_hidden: number of hidden layers
-    N_neurons: number of neurons in each of the hidden layers
-    """
-    model = Sequential()
-    model.add(Dense(out_size, input_dim=in_size, kernel_initializer='normal', activation='relu',
-        name='first' ))
-    for h in np.arange(N_hidden):
-        lname = "H"+str(h)
-        model.add(Dense(out_size, kernel_initializer='normal', activation='relu', name=lname ))
 
-    model.add(Dense(out_size, kernel_initializer='normal', name='last'))
 
-    # Compile model
-    if N_gpus == 1:
-        model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K])
-    else:
-        gpu_list = ["gpu(%d)" % i for i in range(N_gpus)]
-        model.compile(loss=relerr_loss, optimizer='adam', metrics=[calc_mre_K], context = gpu_list)
-    return model
 
-#staging area for new models
-def plot_training_history(history, red_factor):
-    loss, val_loss = history.history['loss'], history.history['val_loss']
-    loss = np.asarray(loss)/red_factor
-    val_loss = np.asarray(val_loss)/red_factor
-    epochs = len(loss)
 
-    fig, axs = plt.subplots(1,1, figsize=(5,5))
-    axs.semilogy(np.arange(1, epochs + 1), loss, label='train error')
-    axs.semilogy(np.arange(1, epochs + 1), val_loss, label='validation error')
-    axs.set_xlabel('Epoch number')
-    axs.set_ylabel('Mean Relative Error (MRE) (%)')
-    axs.legend(loc="best")
 

+ 0 - 0
untitled


Some files were not shown because too many files changed in this diff