Sfoglia il codice sorgente

conv layer nets added and working

Ravi Hegde 6 anni fa
parent
commit
9af47cfec1

+ 6 - 0
.ipynb_checkpoints/Untitled1-checkpoint.ipynb

@@ -0,0 +1,6 @@
+{
+ "cells": [],
+ "metadata": {},
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 6 - 0
.ipynb_checkpoints/Untitled2-checkpoint.ipynb

@@ -0,0 +1,6 @@
+{
+ "cells": [],
+ "metadata": {},
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 6 - 0
.ipynb_checkpoints/Untitled3-checkpoint.ipynb

@@ -0,0 +1,6 @@
+{
+ "cells": [],
+ "metadata": {},
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

File diff suppressed because it is too large
+ 100533 - 0
.ipynb_checkpoints/Untitled4-checkpoint.ipynb


+ 1738 - 0
.ipynb_checkpoints/fully_con_hyp_tuning-checkpoint.ipynb

@@ -0,0 +1,1738 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# we will study the hyperparamter tuning of fully connected scatternet\n",
+    "\n",
+    "# first find optimal number of layers and neuron numbers\n",
+    "# second optimize the batch size and number of epochs for the best learned architecture\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Loading the dataset here"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-01T03:45:35.676510Z",
+     "start_time": "2018-09-01T03:45:35.262232Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
+      "  return f(*args, **kwds)\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Dataset has been loaded\n",
+      "x-train (60000, 8)\n",
+      "x-test  (40000, 8)\n",
+      "y-train (60000, 256)\n",
+      "y-test  (40000, 256)\n"
+     ]
+    }
+   ],
+   "source": [
+    "import numpy as np\n",
+    "\n",
+    "import h5py\n",
+    "from sklearn.model_selection import train_test_split\n",
+    "\n",
+    "#now load this dataset \n",
+    "h5f = h5py.File('./datasets/s8_sio2tio2_v2.h5','r')\n",
+    "X = h5f['sizes'][:]\n",
+    "Y = h5f['spectrum'][:]\n",
+    "\n",
+    "#get the ranges of the loaded data\n",
+    "num_layers = X.shape[1]\n",
+    "num_lpoints = Y.shape[1]\n",
+    "size_max = np.amax(X)\n",
+    "size_min = np.amin(X)\n",
+    "size_av = 0.5*(size_max + size_min)\n",
+    "\n",
+    "#this information is not given in the dataset\n",
+    "lam_min = 300\n",
+    "lam_max = 1200\n",
+    "lams = np.linspace(lam_min, lam_max, num_lpoints)\n",
+    "\n",
+    "#create a train - test split of the dataset\n",
+    "x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.4, random_state=42)\n",
+    "\n",
+    "# normalize inputs \n",
+    "x_train = (x_train - 50)/20 \n",
+    "x_test = (x_test - 50)/20 \n",
+    "\n",
+    "print(\"Dataset has been loaded\")\n",
+    "print(\"x-train\", x_train.shape)\n",
+    "print(\"x-test \", x_test.shape)\n",
+    "print(\"y-train\", y_train.shape)\n",
+    "print(\"y-test \", y_test.shape)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### create models here"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-01T09:05:40.103777Z",
+     "start_time": "2018-09-01T09:05:39.800179Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "_________________________________________________________________\n",
+      "Layer (type)                 Output Shape              Param #   \n",
+      "=================================================================\n",
+      "first (Dense)                (None, 256)               2304      \n",
+      "_________________________________________________________________\n",
+      "activation_58 (Activation)   (None, 256)               0         \n",
+      "_________________________________________________________________\n",
+      "Reshape1 (Reshape)           (None, 4, 64)             0         \n",
+      "_________________________________________________________________\n",
+      "Up1 (UpSampling1D)           (None, 8, 64)             0         \n",
+      "_________________________________________________________________\n",
+      "Conv1 (Conv1D)               (None, 8, 64)             12352     \n",
+      "_________________________________________________________________\n",
+      "activation_59 (Activation)   (None, 8, 64)             0         \n",
+      "_________________________________________________________________\n",
+      "Conv2 (Conv1D)               (None, 8, 32)             6176      \n",
+      "_________________________________________________________________\n",
+      "activation_60 (Activation)   (None, 8, 32)             0         \n",
+      "_________________________________________________________________\n",
+      "flatten_20 (Flatten)         (None, 256)               0         \n",
+      "=================================================================\n",
+      "Total params: 20,832\n",
+      "Trainable params: 20,832\n",
+      "Non-trainable params: 0\n",
+      "_________________________________________________________________\n"
+     ]
+    }
+   ],
+   "source": [
+    "import scnets as scn\n",
+    "from IPython.display import SVG\n",
+    "from keras.utils.vis_utils import model_to_dot\n",
+    "\n",
+    "#define and visualize the model here\n",
+    "#model = scn.fullycon(num_layers, num_lpoints, 4, 500, 2)\n",
+    "\n",
+    "model = scn.conv1dmodel(8, 256, 3)\n",
+    "model.summary()\n",
+    "#SVG(model_to_dot(model).create(prog='dot', format='svg'))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-01T09:42:14.931907Z",
+     "start_time": "2018-09-01T09:06:05.599209Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Train on 48000 samples, validate on 12000 samples\n",
+      "Epoch 1/500\n",
+      " 2304/48000 [>.............................] - ETA: 3s - loss: 3963.3802 - acc: 0.0065 - calc_mre_K: 48.3811 "
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.03125). Is this intended?\n",
+      "  force_init=force_init)\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "48000/48000 [==============================] - 4s 90us/step - loss: 706.1956 - acc: 0.0685 - calc_mre_K: 8.6206 - val_loss: 287.1653 - val_acc: 0.1354 - val_calc_mre_K: 3.5054\n",
+      "Epoch 2/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 240.4728 - acc: 0.2206 - calc_mre_K: 2.9355 - val_loss: 235.9114 - val_acc: 0.2408 - val_calc_mre_K: 2.8798\n",
+      "Epoch 3/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 199.6521 - acc: 0.2823 - calc_mre_K: 2.4372 - val_loss: 207.5411 - val_acc: 0.3072 - val_calc_mre_K: 2.5335\n",
+      "Epoch 4/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 183.1636 - acc: 0.3334 - calc_mre_K: 2.2359 - val_loss: 175.4547 - val_acc: 0.3445 - val_calc_mre_K: 2.1418\n",
+      "Epoch 5/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 173.5258 - acc: 0.3634 - calc_mre_K: 2.1182 - val_loss: 166.9731 - val_acc: 0.3980 - val_calc_mre_K: 2.0382\n",
+      "Epoch 6/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 165.3657 - acc: 0.3800 - calc_mre_K: 2.0186 - val_loss: 169.2977 - val_acc: 0.3767 - val_calc_mre_K: 2.0666\n",
+      "Epoch 7/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 158.7317 - acc: 0.3904 - calc_mre_K: 1.9376 - val_loss: 152.2851 - val_acc: 0.3932 - val_calc_mre_K: 1.8589\n",
+      "Epoch 8/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 153.6869 - acc: 0.4007 - calc_mre_K: 1.8761 - val_loss: 151.0466 - val_acc: 0.3727 - val_calc_mre_K: 1.8438\n",
+      "Epoch 9/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 148.5147 - acc: 0.4170 - calc_mre_K: 1.8129 - val_loss: 156.3614 - val_acc: 0.4127 - val_calc_mre_K: 1.9087\n",
+      "Epoch 10/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 142.9498 - acc: 0.4267 - calc_mre_K: 1.7450 - val_loss: 138.8417 - val_acc: 0.4441 - val_calc_mre_K: 1.6948\n",
+      "Epoch 11/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 141.3909 - acc: 0.4405 - calc_mre_K: 1.7260 - val_loss: 153.6785 - val_acc: 0.4201 - val_calc_mre_K: 1.8760\n",
+      "Epoch 12/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 137.8279 - acc: 0.4526 - calc_mre_K: 1.6825 - val_loss: 132.2860 - val_acc: 0.4448 - val_calc_mre_K: 1.6148\n",
+      "Epoch 13/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 135.4299 - acc: 0.4565 - calc_mre_K: 1.6532 - val_loss: 145.2083 - val_acc: 0.4837 - val_calc_mre_K: 1.7726\n",
+      "Epoch 14/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 133.2266 - acc: 0.4705 - calc_mre_K: 1.6263 - val_loss: 125.5341 - val_acc: 0.4547 - val_calc_mre_K: 1.5324\n",
+      "Epoch 15/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 131.7341 - acc: 0.4744 - calc_mre_K: 1.6081 - val_loss: 133.1067 - val_acc: 0.4829 - val_calc_mre_K: 1.6248\n",
+      "Epoch 16/500\n",
+      "48000/48000 [==============================] - 5s 100us/step - loss: 129.5008 - acc: 0.4760 - calc_mre_K: 1.5808 - val_loss: 125.6019 - val_acc: 0.4654 - val_calc_mre_K: 1.5332\n",
+      "Epoch 17/500\n",
+      "48000/48000 [==============================] - 5s 110us/step - loss: 128.7901 - acc: 0.4797 - calc_mre_K: 1.5721 - val_loss: 129.3205 - val_acc: 0.4972 - val_calc_mre_K: 1.5786\n",
+      "Epoch 18/500\n",
+      "48000/48000 [==============================] - 5s 104us/step - loss: 126.4674 - acc: 0.4828 - calc_mre_K: 1.5438 - val_loss: 128.3473 - val_acc: 0.4984 - val_calc_mre_K: 1.5667\n",
+      "Epoch 19/500\n",
+      "48000/48000 [==============================] - 5s 109us/step - loss: 125.0717 - acc: 0.4832 - calc_mre_K: 1.5268 - val_loss: 124.0958 - val_acc: 0.4912 - val_calc_mre_K: 1.5148\n",
+      "Epoch 20/500\n",
+      "48000/48000 [==============================] - 5s 101us/step - loss: 123.4301 - acc: 0.4813 - calc_mre_K: 1.5067 - val_loss: 125.2088 - val_acc: 0.5084 - val_calc_mre_K: 1.5284\n",
+      "Epoch 21/500\n",
+      "48000/48000 [==============================] - 5s 105us/step - loss: 122.2793 - acc: 0.4835 - calc_mre_K: 1.4927 - val_loss: 122.3527 - val_acc: 0.4926 - val_calc_mre_K: 1.4936\n",
+      "Epoch 22/500\n",
+      "48000/48000 [==============================] - 5s 100us/step - loss: 120.6167 - acc: 0.4830 - calc_mre_K: 1.4724 - val_loss: 117.5707 - val_acc: 0.4974 - val_calc_mre_K: 1.4352\n",
+      "Epoch 23/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 119.5484 - acc: 0.4808 - calc_mre_K: 1.4593 - val_loss: 117.0459 - val_acc: 0.4860 - val_calc_mre_K: 1.4288\n",
+      "Epoch 24/500\n",
+      "48000/48000 [==============================] - 5s 103us/step - loss: 117.6612 - acc: 0.4781 - calc_mre_K: 1.4363 - val_loss: 114.1131 - val_acc: 0.5009 - val_calc_mre_K: 1.3930\n",
+      "Epoch 25/500\n",
+      "48000/48000 [==============================] - 5s 99us/step - loss: 116.8395 - acc: 0.4765 - calc_mre_K: 1.4263 - val_loss: 116.4501 - val_acc: 0.4639 - val_calc_mre_K: 1.4215\n",
+      "Epoch 26/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 115.7186 - acc: 0.4755 - calc_mre_K: 1.4126 - val_loss: 113.8242 - val_acc: 0.4517 - val_calc_mre_K: 1.3895\n",
+      "Epoch 27/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 115.9016 - acc: 0.4782 - calc_mre_K: 1.4148 - val_loss: 124.8452 - val_acc: 0.4783 - val_calc_mre_K: 1.5240\n",
+      "Epoch 28/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 113.6096 - acc: 0.4775 - calc_mre_K: 1.3868 - val_loss: 109.4969 - val_acc: 0.4976 - val_calc_mre_K: 1.3366\n",
+      "Epoch 29/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 112.7697 - acc: 0.4718 - calc_mre_K: 1.3766 - val_loss: 124.7398 - val_acc: 0.4655 - val_calc_mre_K: 1.5227\n",
+      "Epoch 30/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 112.1795 - acc: 0.4730 - calc_mre_K: 1.3694 - val_loss: 107.9709 - val_acc: 0.4783 - val_calc_mre_K: 1.3180\n",
+      "Epoch 31/500\n",
+      "48000/48000 [==============================] - 4s 79us/step - loss: 111.0483 - acc: 0.4719 - calc_mre_K: 1.3556 - val_loss: 109.7872 - val_acc: 0.4618 - val_calc_mre_K: 1.3402\n",
+      "Epoch 32/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 109.8041 - acc: 0.4705 - calc_mre_K: 1.3404 - val_loss: 127.1827 - val_acc: 0.4750 - val_calc_mre_K: 1.5525\n",
+      "Epoch 33/500\n",
+      "48000/48000 [==============================] - 4s 81us/step - loss: 109.8176 - acc: 0.4748 - calc_mre_K: 1.3405 - val_loss: 110.4792 - val_acc: 0.4017 - val_calc_mre_K: 1.3486\n",
+      "Epoch 34/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 108.5102 - acc: 0.4687 - calc_mre_K: 1.3246 - val_loss: 106.3191 - val_acc: 0.4547 - val_calc_mre_K: 1.2978\n",
+      "Epoch 35/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 108.2114 - acc: 0.4714 - calc_mre_K: 1.3209 - val_loss: 102.4241 - val_acc: 0.4495 - val_calc_mre_K: 1.2503\n",
+      "Epoch 36/500\n",
+      "48000/48000 [==============================] - 5s 98us/step - loss: 107.3119 - acc: 0.4657 - calc_mre_K: 1.3100 - val_loss: 107.5346 - val_acc: 0.4368 - val_calc_mre_K: 1.3127\n",
+      "Epoch 37/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 106.2434 - acc: 0.4693 - calc_mre_K: 1.2969 - val_loss: 111.2277 - val_acc: 0.4447 - val_calc_mre_K: 1.3578\n",
+      "Epoch 38/500\n",
+      "48000/48000 [==============================] - 5s 97us/step - loss: 105.1790 - acc: 0.4652 - calc_mre_K: 1.2839 - val_loss: 101.7198 - val_acc: 0.4635 - val_calc_mre_K: 1.2417\n",
+      "Epoch 39/500\n",
+      "48000/48000 [==============================] - 5s 100us/step - loss: 105.4222 - acc: 0.4650 - calc_mre_K: 1.2869 - val_loss: 104.2707 - val_acc: 0.4477 - val_calc_mre_K: 1.2728\n",
+      "Epoch 40/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 104.1228 - acc: 0.4640 - calc_mre_K: 1.2710 - val_loss: 104.9903 - val_acc: 0.4891 - val_calc_mre_K: 1.2816\n",
+      "Epoch 41/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 104.2051 - acc: 0.4632 - calc_mre_K: 1.2720 - val_loss: 100.7008 - val_acc: 0.4533 - val_calc_mre_K: 1.2293\n",
+      "Epoch 42/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 102.5908 - acc: 0.4636 - calc_mre_K: 1.2523 - val_loss: 114.1402 - val_acc: 0.4462 - val_calc_mre_K: 1.3933\n",
+      "Epoch 43/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 102.4765 - acc: 0.4628 - calc_mre_K: 1.2509 - val_loss: 101.9461 - val_acc: 0.4576 - val_calc_mre_K: 1.2445\n",
+      "Epoch 44/500\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "48000/48000 [==============================] - 4s 92us/step - loss: 101.9847 - acc: 0.4641 - calc_mre_K: 1.2449 - val_loss: 98.6102 - val_acc: 0.4888 - val_calc_mre_K: 1.2037\n",
+      "Epoch 45/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 102.1112 - acc: 0.4599 - calc_mre_K: 1.2465 - val_loss: 105.3515 - val_acc: 0.4853 - val_calc_mre_K: 1.2860\n",
+      "Epoch 46/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 101.5537 - acc: 0.4636 - calc_mre_K: 1.2397 - val_loss: 97.7168 - val_acc: 0.4669 - val_calc_mre_K: 1.1928\n",
+      "Epoch 47/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 100.2124 - acc: 0.4625 - calc_mre_K: 1.2233 - val_loss: 97.1910 - val_acc: 0.4419 - val_calc_mre_K: 1.1864\n",
+      "Epoch 48/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 100.3890 - acc: 0.4642 - calc_mre_K: 1.2255 - val_loss: 97.9009 - val_acc: 0.4770 - val_calc_mre_K: 1.1951\n",
+      "Epoch 49/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 98.9376 - acc: 0.4678 - calc_mre_K: 1.2077 - val_loss: 99.2925 - val_acc: 0.4730 - val_calc_mre_K: 1.2121\n",
+      "Epoch 50/500\n",
+      "48000/48000 [==============================] - 5s 96us/step - loss: 99.1543 - acc: 0.4699 - calc_mre_K: 1.2104 - val_loss: 111.5877 - val_acc: 0.4945 - val_calc_mre_K: 1.3622\n",
+      "Epoch 51/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 98.5129 - acc: 0.4685 - calc_mre_K: 1.2025 - val_loss: 97.9373 - val_acc: 0.4536 - val_calc_mre_K: 1.1955\n",
+      "Epoch 52/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 97.7274 - acc: 0.4674 - calc_mre_K: 1.1930 - val_loss: 104.5049 - val_acc: 0.4272 - val_calc_mre_K: 1.2757\n",
+      "Epoch 53/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 97.3176 - acc: 0.4714 - calc_mre_K: 1.1880 - val_loss: 104.9285 - val_acc: 0.4561 - val_calc_mre_K: 1.2809\n",
+      "Epoch 54/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 96.4600 - acc: 0.4714 - calc_mre_K: 1.1775 - val_loss: 93.5698 - val_acc: 0.4842 - val_calc_mre_K: 1.1422\n",
+      "Epoch 55/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 96.2612 - acc: 0.4695 - calc_mre_K: 1.1751 - val_loss: 93.8526 - val_acc: 0.4656 - val_calc_mre_K: 1.1457\n",
+      "Epoch 56/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 96.6569 - acc: 0.4750 - calc_mre_K: 1.1799 - val_loss: 91.3333 - val_acc: 0.4988 - val_calc_mre_K: 1.1149\n",
+      "Epoch 57/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 95.4529 - acc: 0.4735 - calc_mre_K: 1.1652 - val_loss: 91.2335 - val_acc: 0.4964 - val_calc_mre_K: 1.1137\n",
+      "Epoch 58/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 94.9263 - acc: 0.4729 - calc_mre_K: 1.1588 - val_loss: 98.6745 - val_acc: 0.4805 - val_calc_mre_K: 1.2045\n",
+      "Epoch 59/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 95.4020 - acc: 0.4734 - calc_mre_K: 1.1646 - val_loss: 92.4611 - val_acc: 0.4903 - val_calc_mre_K: 1.1287\n",
+      "Epoch 60/500\n",
+      "48000/48000 [==============================] - 5s 101us/step - loss: 93.7607 - acc: 0.4754 - calc_mre_K: 1.1445 - val_loss: 92.4899 - val_acc: 0.4502 - val_calc_mre_K: 1.1290\n",
+      "Epoch 61/500\n",
+      "48000/48000 [==============================] - 5s 101us/step - loss: 93.7852 - acc: 0.4763 - calc_mre_K: 1.1448 - val_loss: 92.7147 - val_acc: 0.4889 - val_calc_mre_K: 1.1318\n",
+      "Epoch 62/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 93.8594 - acc: 0.4758 - calc_mre_K: 1.1457 - val_loss: 93.7416 - val_acc: 0.4839 - val_calc_mre_K: 1.1443\n",
+      "Epoch 63/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 92.7651 - acc: 0.4802 - calc_mre_K: 1.1324 - val_loss: 92.3564 - val_acc: 0.4756 - val_calc_mre_K: 1.1274\n",
+      "Epoch 64/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 93.4941 - acc: 0.4745 - calc_mre_K: 1.1413 - val_loss: 95.4468 - val_acc: 0.4637 - val_calc_mre_K: 1.1651\n",
+      "Epoch 65/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 92.9577 - acc: 0.4811 - calc_mre_K: 1.1347 - val_loss: 94.1396 - val_acc: 0.4566 - val_calc_mre_K: 1.1492\n",
+      "Epoch 66/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 92.2031 - acc: 0.4801 - calc_mre_K: 1.1255 - val_loss: 94.2149 - val_acc: 0.5150 - val_calc_mre_K: 1.1501\n",
+      "Epoch 67/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 91.7885 - acc: 0.4825 - calc_mre_K: 1.1205 - val_loss: 89.0208 - val_acc: 0.5012 - val_calc_mre_K: 1.0867\n",
+      "Epoch 68/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 90.8483 - acc: 0.4850 - calc_mre_K: 1.1090 - val_loss: 89.1484 - val_acc: 0.4612 - val_calc_mre_K: 1.0882\n",
+      "Epoch 69/500\n",
+      "48000/48000 [==============================] - 5s 98us/step - loss: 90.8245 - acc: 0.4828 - calc_mre_K: 1.1087 - val_loss: 93.2117 - val_acc: 0.4774 - val_calc_mre_K: 1.1378\n",
+      "Epoch 70/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 90.8614 - acc: 0.4807 - calc_mre_K: 1.1091 - val_loss: 88.4758 - val_acc: 0.5062 - val_calc_mre_K: 1.0800\n",
+      "Epoch 71/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 90.0377 - acc: 0.4866 - calc_mre_K: 1.0991 - val_loss: 90.7641 - val_acc: 0.4643 - val_calc_mre_K: 1.1080\n",
+      "Epoch 72/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 90.6533 - acc: 0.4826 - calc_mre_K: 1.1066 - val_loss: 88.6626 - val_acc: 0.4963 - val_calc_mre_K: 1.0823\n",
+      "Epoch 73/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 89.9863 - acc: 0.4832 - calc_mre_K: 1.0985 - val_loss: 93.9717 - val_acc: 0.4691 - val_calc_mre_K: 1.1471\n",
+      "Epoch 74/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 89.6262 - acc: 0.4888 - calc_mre_K: 1.0941 - val_loss: 91.6102 - val_acc: 0.4873 - val_calc_mre_K: 1.1183\n",
+      "Epoch 75/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 89.2185 - acc: 0.4872 - calc_mre_K: 1.0891 - val_loss: 92.9164 - val_acc: 0.4732 - val_calc_mre_K: 1.1342\n",
+      "Epoch 76/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 88.7742 - acc: 0.4863 - calc_mre_K: 1.0837 - val_loss: 87.5795 - val_acc: 0.5152 - val_calc_mre_K: 1.0691\n",
+      "Epoch 77/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 88.8478 - acc: 0.4891 - calc_mre_K: 1.0846 - val_loss: 86.6572 - val_acc: 0.4680 - val_calc_mre_K: 1.0578\n",
+      "Epoch 78/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 87.8198 - acc: 0.4921 - calc_mre_K: 1.0720 - val_loss: 86.5042 - val_acc: 0.4897 - val_calc_mre_K: 1.0560\n",
+      "Epoch 79/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 87.9323 - acc: 0.4909 - calc_mre_K: 1.0734 - val_loss: 88.7598 - val_acc: 0.4683 - val_calc_mre_K: 1.0835\n",
+      "Epoch 80/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 87.4998 - acc: 0.4884 - calc_mre_K: 1.0681 - val_loss: 86.5280 - val_acc: 0.4665 - val_calc_mre_K: 1.0563\n",
+      "Epoch 81/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 87.2416 - acc: 0.4909 - calc_mre_K: 1.0650 - val_loss: 85.1910 - val_acc: 0.4665 - val_calc_mre_K: 1.0399\n",
+      "Epoch 82/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 87.7128 - acc: 0.4917 - calc_mre_K: 1.0707 - val_loss: 97.6387 - val_acc: 0.4613 - val_calc_mre_K: 1.1919\n",
+      "Epoch 83/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 86.8432 - acc: 0.4904 - calc_mre_K: 1.0601 - val_loss: 87.5226 - val_acc: 0.4528 - val_calc_mre_K: 1.0684\n",
+      "Epoch 84/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 87.1789 - acc: 0.4944 - calc_mre_K: 1.0642 - val_loss: 89.1524 - val_acc: 0.4833 - val_calc_mre_K: 1.0883\n",
+      "Epoch 85/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 86.4981 - acc: 0.4958 - calc_mre_K: 1.0559 - val_loss: 86.8628 - val_acc: 0.4718 - val_calc_mre_K: 1.0603\n",
+      "Epoch 86/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 86.1544 - acc: 0.4932 - calc_mre_K: 1.0517 - val_loss: 87.4378 - val_acc: 0.4799 - val_calc_mre_K: 1.0674\n",
+      "Epoch 87/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 85.9592 - acc: 0.4954 - calc_mre_K: 1.0493 - val_loss: 84.3522 - val_acc: 0.4893 - val_calc_mre_K: 1.0297\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch 88/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 86.1989 - acc: 0.4939 - calc_mre_K: 1.0522 - val_loss: 84.0063 - val_acc: 0.4600 - val_calc_mre_K: 1.0255\n",
+      "Epoch 89/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 85.4407 - acc: 0.4970 - calc_mre_K: 1.0430 - val_loss: 85.9898 - val_acc: 0.4267 - val_calc_mre_K: 1.0497\n",
+      "Epoch 90/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 85.4680 - acc: 0.4950 - calc_mre_K: 1.0433 - val_loss: 90.5349 - val_acc: 0.4725 - val_calc_mre_K: 1.1052\n",
+      "Epoch 91/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 85.0812 - acc: 0.4954 - calc_mre_K: 1.0386 - val_loss: 85.1947 - val_acc: 0.4964 - val_calc_mre_K: 1.0400\n",
+      "Epoch 92/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 84.6561 - acc: 0.4926 - calc_mre_K: 1.0334 - val_loss: 84.5826 - val_acc: 0.4935 - val_calc_mre_K: 1.0325\n",
+      "Epoch 93/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 84.9177 - acc: 0.4933 - calc_mre_K: 1.0366 - val_loss: 84.1902 - val_acc: 0.5288 - val_calc_mre_K: 1.0277\n",
+      "Epoch 94/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 84.4291 - acc: 0.4974 - calc_mre_K: 1.0306 - val_loss: 82.7314 - val_acc: 0.4773 - val_calc_mre_K: 1.0099\n",
+      "Epoch 95/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 84.6077 - acc: 0.4974 - calc_mre_K: 1.0328 - val_loss: 83.2056 - val_acc: 0.5130 - val_calc_mre_K: 1.0157\n",
+      "Epoch 96/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 84.2808 - acc: 0.4991 - calc_mre_K: 1.0288 - val_loss: 83.5173 - val_acc: 0.4991 - val_calc_mre_K: 1.0195\n",
+      "Epoch 97/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 83.7713 - acc: 0.4975 - calc_mre_K: 1.0226 - val_loss: 81.9207 - val_acc: 0.5178 - val_calc_mre_K: 1.0000\n",
+      "Epoch 98/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 83.5460 - acc: 0.4953 - calc_mre_K: 1.0198 - val_loss: 87.2668 - val_acc: 0.4433 - val_calc_mre_K: 1.0653\n",
+      "Epoch 99/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 83.4941 - acc: 0.4982 - calc_mre_K: 1.0192 - val_loss: 81.3419 - val_acc: 0.4815 - val_calc_mre_K: 0.9929\n",
+      "Epoch 100/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 83.0367 - acc: 0.5005 - calc_mre_K: 1.0136 - val_loss: 82.3663 - val_acc: 0.5061 - val_calc_mre_K: 1.0054\n",
+      "Epoch 101/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 82.6949 - acc: 0.4970 - calc_mre_K: 1.0095 - val_loss: 82.5149 - val_acc: 0.4731 - val_calc_mre_K: 1.0073\n",
+      "Epoch 102/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 83.0349 - acc: 0.4985 - calc_mre_K: 1.0136 - val_loss: 83.7778 - val_acc: 0.4523 - val_calc_mre_K: 1.0227\n",
+      "Epoch 103/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 82.3564 - acc: 0.4997 - calc_mre_K: 1.0053 - val_loss: 81.3470 - val_acc: 0.5047 - val_calc_mre_K: 0.9930\n",
+      "Epoch 104/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 82.8676 - acc: 0.5030 - calc_mre_K: 1.0116 - val_loss: 83.8691 - val_acc: 0.4649 - val_calc_mre_K: 1.0238\n",
+      "Epoch 105/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 82.7472 - acc: 0.5011 - calc_mre_K: 1.0101 - val_loss: 83.6710 - val_acc: 0.4633 - val_calc_mre_K: 1.0214\n",
+      "Epoch 106/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 82.0327 - acc: 0.5025 - calc_mre_K: 1.0014 - val_loss: 80.8363 - val_acc: 0.4873 - val_calc_mre_K: 0.9868\n",
+      "Epoch 107/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 81.5267 - acc: 0.5059 - calc_mre_K: 0.9952 - val_loss: 80.4791 - val_acc: 0.5057 - val_calc_mre_K: 0.9824\n",
+      "Epoch 108/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 82.3028 - acc: 0.5027 - calc_mre_K: 1.0047 - val_loss: 81.0917 - val_acc: 0.4914 - val_calc_mre_K: 0.9899\n",
+      "Epoch 109/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 81.5495 - acc: 0.5041 - calc_mre_K: 0.9955 - val_loss: 84.7828 - val_acc: 0.4593 - val_calc_mre_K: 1.0349\n",
+      "Epoch 110/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 81.0776 - acc: 0.5062 - calc_mre_K: 0.9897 - val_loss: 77.6384 - val_acc: 0.5337 - val_calc_mre_K: 0.9477\n",
+      "Epoch 111/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 81.6869 - acc: 0.5032 - calc_mre_K: 0.9972 - val_loss: 86.5012 - val_acc: 0.4862 - val_calc_mre_K: 1.0559\n",
+      "Epoch 112/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 81.0899 - acc: 0.5086 - calc_mre_K: 0.9899 - val_loss: 91.9461 - val_acc: 0.4458 - val_calc_mre_K: 1.1224\n",
+      "Epoch 113/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 80.8484 - acc: 0.5070 - calc_mre_K: 0.9869 - val_loss: 81.4088 - val_acc: 0.5270 - val_calc_mre_K: 0.9938\n",
+      "Epoch 114/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 80.9897 - acc: 0.5051 - calc_mre_K: 0.9886 - val_loss: 78.0251 - val_acc: 0.5269 - val_calc_mre_K: 0.9525\n",
+      "Epoch 115/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 80.8859 - acc: 0.5055 - calc_mre_K: 0.9874 - val_loss: 80.4380 - val_acc: 0.4993 - val_calc_mre_K: 0.9819\n",
+      "Epoch 116/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 80.1903 - acc: 0.5121 - calc_mre_K: 0.9789 - val_loss: 80.3441 - val_acc: 0.5321 - val_calc_mre_K: 0.9808\n",
+      "Epoch 117/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 80.8213 - acc: 0.5062 - calc_mre_K: 0.9866 - val_loss: 79.1370 - val_acc: 0.5138 - val_calc_mre_K: 0.9660\n",
+      "Epoch 118/500\n",
+      "48000/48000 [==============================] - 5s 96us/step - loss: 79.8171 - acc: 0.5041 - calc_mre_K: 0.9743 - val_loss: 81.6854 - val_acc: 0.5162 - val_calc_mre_K: 0.9971\n",
+      "Epoch 119/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 79.8396 - acc: 0.5121 - calc_mre_K: 0.9746 - val_loss: 77.7102 - val_acc: 0.5123 - val_calc_mre_K: 0.9486\n",
+      "Epoch 120/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 79.3939 - acc: 0.5090 - calc_mre_K: 0.9692 - val_loss: 79.5331 - val_acc: 0.5104 - val_calc_mre_K: 0.9709\n",
+      "Epoch 121/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 79.7434 - acc: 0.5062 - calc_mre_K: 0.9734 - val_loss: 85.9368 - val_acc: 0.4838 - val_calc_mre_K: 1.0490\n",
+      "Epoch 122/500\n",
+      "48000/48000 [==============================] - 4s 82us/step - loss: 79.2858 - acc: 0.5116 - calc_mre_K: 0.9678 - val_loss: 77.6053 - val_acc: 0.4823 - val_calc_mre_K: 0.9473\n",
+      "Epoch 123/500\n",
+      "48000/48000 [==============================] - 4s 82us/step - loss: 78.9654 - acc: 0.5112 - calc_mre_K: 0.9639 - val_loss: 76.2910 - val_acc: 0.5242 - val_calc_mre_K: 0.9313\n",
+      "Epoch 124/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 79.1110 - acc: 0.5090 - calc_mre_K: 0.9657 - val_loss: 82.0711 - val_acc: 0.4926 - val_calc_mre_K: 1.0018\n",
+      "Epoch 125/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 79.5170 - acc: 0.5056 - calc_mre_K: 0.9707 - val_loss: 78.1056 - val_acc: 0.4849 - val_calc_mre_K: 0.9534\n",
+      "Epoch 126/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 78.7716 - acc: 0.5074 - calc_mre_K: 0.9616 - val_loss: 83.2577 - val_acc: 0.5077 - val_calc_mre_K: 1.0163\n",
+      "Epoch 127/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 78.8280 - acc: 0.5127 - calc_mre_K: 0.9623 - val_loss: 77.7970 - val_acc: 0.5302 - val_calc_mre_K: 0.9497\n",
+      "Epoch 128/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 78.5491 - acc: 0.5069 - calc_mre_K: 0.9589 - val_loss: 77.7517 - val_acc: 0.5208 - val_calc_mre_K: 0.9491\n",
+      "Epoch 129/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 78.6723 - acc: 0.5117 - calc_mre_K: 0.9604 - val_loss: 78.0226 - val_acc: 0.4991 - val_calc_mre_K: 0.9524\n",
+      "Epoch 130/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 78.1994 - acc: 0.5143 - calc_mre_K: 0.9546 - val_loss: 77.3271 - val_acc: 0.4947 - val_calc_mre_K: 0.9439\n",
+      "Epoch 131/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 78.2468 - acc: 0.5134 - calc_mre_K: 0.9552 - val_loss: 81.5466 - val_acc: 0.5151 - val_calc_mre_K: 0.9954\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch 132/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 78.4453 - acc: 0.5131 - calc_mre_K: 0.9576 - val_loss: 83.8662 - val_acc: 0.4527 - val_calc_mre_K: 1.0238\n",
+      "Epoch 133/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 78.0065 - acc: 0.5138 - calc_mre_K: 0.9522 - val_loss: 75.2619 - val_acc: 0.4985 - val_calc_mre_K: 0.9187\n",
+      "Epoch 134/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 77.8759 - acc: 0.5075 - calc_mre_K: 0.9506 - val_loss: 78.7407 - val_acc: 0.4856 - val_calc_mre_K: 0.9612\n",
+      "Epoch 135/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 77.9598 - acc: 0.5106 - calc_mre_K: 0.9517 - val_loss: 76.2512 - val_acc: 0.4908 - val_calc_mre_K: 0.9308\n",
+      "Epoch 136/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 77.2496 - acc: 0.5143 - calc_mre_K: 0.9430 - val_loss: 75.7200 - val_acc: 0.5052 - val_calc_mre_K: 0.9243\n",
+      "Epoch 137/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 77.0444 - acc: 0.5080 - calc_mre_K: 0.9405 - val_loss: 76.9845 - val_acc: 0.5352 - val_calc_mre_K: 0.9398\n",
+      "Epoch 138/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 77.6715 - acc: 0.5154 - calc_mre_K: 0.9481 - val_loss: 76.4670 - val_acc: 0.4968 - val_calc_mre_K: 0.9334\n",
+      "Epoch 139/500\n",
+      "48000/48000 [==============================] - 5s 97us/step - loss: 76.8383 - acc: 0.5097 - calc_mre_K: 0.9380 - val_loss: 77.1099 - val_acc: 0.5011 - val_calc_mre_K: 0.9413\n",
+      "Epoch 140/500\n",
+      "48000/48000 [==============================] - 5s 100us/step - loss: 77.0593 - acc: 0.5086 - calc_mre_K: 0.9407 - val_loss: 75.4828 - val_acc: 0.4769 - val_calc_mre_K: 0.9214\n",
+      "Epoch 141/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 76.7813 - acc: 0.5134 - calc_mre_K: 0.9373 - val_loss: 79.6155 - val_acc: 0.4964 - val_calc_mre_K: 0.9719\n",
+      "Epoch 142/500\n",
+      "48000/48000 [==============================] - 5s 97us/step - loss: 76.8563 - acc: 0.5145 - calc_mre_K: 0.9382 - val_loss: 76.3770 - val_acc: 0.5125 - val_calc_mre_K: 0.9323\n",
+      "Epoch 143/500\n",
+      "48000/48000 [==============================] - 5s 97us/step - loss: 76.9136 - acc: 0.5122 - calc_mre_K: 0.9389 - val_loss: 80.2976 - val_acc: 0.5121 - val_calc_mre_K: 0.9802\n",
+      "Epoch 144/500\n",
+      "48000/48000 [==============================] - 5s 98us/step - loss: 76.7159 - acc: 0.5127 - calc_mre_K: 0.9365 - val_loss: 76.2975 - val_acc: 0.4927 - val_calc_mre_K: 0.9314\n",
+      "Epoch 145/500\n",
+      "48000/48000 [==============================] - 5s 101us/step - loss: 76.4500 - acc: 0.5094 - calc_mre_K: 0.9332 - val_loss: 75.1129 - val_acc: 0.5352 - val_calc_mre_K: 0.9169\n",
+      "Epoch 146/500\n",
+      "48000/48000 [==============================] - 5s 96us/step - loss: 76.5047 - acc: 0.5105 - calc_mre_K: 0.9339 - val_loss: 78.7398 - val_acc: 0.4858 - val_calc_mre_K: 0.9612\n",
+      "Epoch 147/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 76.2469 - acc: 0.5101 - calc_mre_K: 0.9307 - val_loss: 75.7282 - val_acc: 0.4959 - val_calc_mre_K: 0.9244\n",
+      "Epoch 148/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 76.3410 - acc: 0.5126 - calc_mre_K: 0.9319 - val_loss: 74.0780 - val_acc: 0.4619 - val_calc_mre_K: 0.9043\n",
+      "Epoch 149/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 75.6485 - acc: 0.5088 - calc_mre_K: 0.9234 - val_loss: 74.6747 - val_acc: 0.4980 - val_calc_mre_K: 0.9116\n",
+      "Epoch 150/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 76.3538 - acc: 0.5058 - calc_mre_K: 0.9321 - val_loss: 74.6296 - val_acc: 0.5011 - val_calc_mre_K: 0.9110\n",
+      "Epoch 151/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 75.7378 - acc: 0.5128 - calc_mre_K: 0.9245 - val_loss: 74.8925 - val_acc: 0.5190 - val_calc_mre_K: 0.9142\n",
+      "Epoch 152/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 75.7434 - acc: 0.5134 - calc_mre_K: 0.9246 - val_loss: 74.4280 - val_acc: 0.4978 - val_calc_mre_K: 0.9085\n",
+      "Epoch 153/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 75.6181 - acc: 0.5081 - calc_mre_K: 0.9231 - val_loss: 73.3659 - val_acc: 0.5122 - val_calc_mre_K: 0.8956\n",
+      "Epoch 154/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 75.5990 - acc: 0.5119 - calc_mre_K: 0.9228 - val_loss: 76.7089 - val_acc: 0.5326 - val_calc_mre_K: 0.9364\n",
+      "Epoch 155/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 75.7128 - acc: 0.5091 - calc_mre_K: 0.9242 - val_loss: 74.7020 - val_acc: 0.4720 - val_calc_mre_K: 0.9119\n",
+      "Epoch 156/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 75.0015 - acc: 0.5137 - calc_mre_K: 0.9155 - val_loss: 74.4258 - val_acc: 0.5132 - val_calc_mre_K: 0.9085\n",
+      "Epoch 157/500\n",
+      "48000/48000 [==============================] - 5s 106us/step - loss: 75.5447 - acc: 0.5089 - calc_mre_K: 0.9222 - val_loss: 75.9239 - val_acc: 0.4629 - val_calc_mre_K: 0.9268\n",
+      "Epoch 158/500\n",
+      "48000/48000 [==============================] - 5s 102us/step - loss: 74.9934 - acc: 0.5080 - calc_mre_K: 0.9154 - val_loss: 83.9992 - val_acc: 0.4866 - val_calc_mre_K: 1.0254\n",
+      "Epoch 159/500\n",
+      "48000/48000 [==============================] - 5s 98us/step - loss: 75.1119 - acc: 0.5059 - calc_mre_K: 0.9169 - val_loss: 76.1479 - val_acc: 0.4611 - val_calc_mre_K: 0.9295\n",
+      "Epoch 160/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 75.2263 - acc: 0.5082 - calc_mre_K: 0.9183 - val_loss: 75.1507 - val_acc: 0.4898 - val_calc_mre_K: 0.9174\n",
+      "Epoch 161/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 74.9651 - acc: 0.5117 - calc_mre_K: 0.9151 - val_loss: 77.6784 - val_acc: 0.5137 - val_calc_mre_K: 0.9482\n",
+      "Epoch 162/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 75.0516 - acc: 0.5088 - calc_mre_K: 0.9162 - val_loss: 73.1820 - val_acc: 0.4928 - val_calc_mre_K: 0.8933\n",
+      "Epoch 163/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 74.7400 - acc: 0.5127 - calc_mre_K: 0.9124 - val_loss: 72.9212 - val_acc: 0.4820 - val_calc_mre_K: 0.8902\n",
+      "Epoch 164/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 74.5674 - acc: 0.5130 - calc_mre_K: 0.9102 - val_loss: 76.5313 - val_acc: 0.5028 - val_calc_mre_K: 0.9342\n",
+      "Epoch 165/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 75.0059 - acc: 0.5120 - calc_mre_K: 0.9156 - val_loss: 76.3635 - val_acc: 0.4920 - val_calc_mre_K: 0.9322\n",
+      "Epoch 166/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 74.0552 - acc: 0.5089 - calc_mre_K: 0.9040 - val_loss: 73.6186 - val_acc: 0.4838 - val_calc_mre_K: 0.8987\n",
+      "Epoch 167/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 74.5485 - acc: 0.5095 - calc_mre_K: 0.9100 - val_loss: 72.9657 - val_acc: 0.5102 - val_calc_mre_K: 0.8907\n",
+      "Epoch 168/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 74.4642 - acc: 0.5092 - calc_mre_K: 0.9090 - val_loss: 78.9351 - val_acc: 0.4803 - val_calc_mre_K: 0.9636\n",
+      "Epoch 169/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 74.3845 - acc: 0.5075 - calc_mre_K: 0.9080 - val_loss: 76.0692 - val_acc: 0.5122 - val_calc_mre_K: 0.9286\n",
+      "Epoch 170/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 74.2517 - acc: 0.5091 - calc_mre_K: 0.9064 - val_loss: 78.4751 - val_acc: 0.5171 - val_calc_mre_K: 0.9579\n",
+      "Epoch 171/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 73.9866 - acc: 0.5139 - calc_mre_K: 0.9032 - val_loss: 76.5064 - val_acc: 0.4785 - val_calc_mre_K: 0.9339\n",
+      "Epoch 172/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 73.8592 - acc: 0.5097 - calc_mre_K: 0.9016 - val_loss: 75.1546 - val_acc: 0.4708 - val_calc_mre_K: 0.9174\n",
+      "Epoch 173/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 74.0455 - acc: 0.5086 - calc_mre_K: 0.9039 - val_loss: 73.6756 - val_acc: 0.4961 - val_calc_mre_K: 0.8994\n",
+      "Epoch 174/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 74.2372 - acc: 0.5140 - calc_mre_K: 0.9062 - val_loss: 73.1205 - val_acc: 0.5064 - val_calc_mre_K: 0.8926\n",
+      "Epoch 175/500\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "48000/48000 [==============================] - 4s 93us/step - loss: 73.5926 - acc: 0.5109 - calc_mre_K: 0.8983 - val_loss: 71.3396 - val_acc: 0.5327 - val_calc_mre_K: 0.8708\n",
+      "Epoch 176/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 73.7895 - acc: 0.5119 - calc_mre_K: 0.9008 - val_loss: 72.4405 - val_acc: 0.5372 - val_calc_mre_K: 0.8843\n",
+      "Epoch 177/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 73.8055 - acc: 0.5103 - calc_mre_K: 0.9009 - val_loss: 73.2557 - val_acc: 0.4870 - val_calc_mre_K: 0.8942\n",
+      "Epoch 178/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 73.4400 - acc: 0.5076 - calc_mre_K: 0.8965 - val_loss: 73.3387 - val_acc: 0.4958 - val_calc_mre_K: 0.8952\n",
+      "Epoch 179/500\n",
+      "48000/48000 [==============================] - 5s 100us/step - loss: 73.6352 - acc: 0.5100 - calc_mre_K: 0.8989 - val_loss: 72.1694 - val_acc: 0.4588 - val_calc_mre_K: 0.8810\n",
+      "Epoch 180/500\n",
+      "48000/48000 [==============================] - 5s 102us/step - loss: 73.3484 - acc: 0.5085 - calc_mre_K: 0.8954 - val_loss: 74.3762 - val_acc: 0.4756 - val_calc_mre_K: 0.9079\n",
+      "Epoch 181/500\n",
+      "48000/48000 [==============================] - 5s 97us/step - loss: 73.4597 - acc: 0.5064 - calc_mre_K: 0.8967 - val_loss: 72.7168 - val_acc: 0.5091 - val_calc_mre_K: 0.8877\n",
+      "Epoch 182/500\n",
+      "48000/48000 [==============================] - 5s 99us/step - loss: 73.4323 - acc: 0.5092 - calc_mre_K: 0.8964 - val_loss: 74.5860 - val_acc: 0.5164 - val_calc_mre_K: 0.9105\n",
+      "Epoch 183/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 72.7318 - acc: 0.5110 - calc_mre_K: 0.8878 - val_loss: 70.5428 - val_acc: 0.5349 - val_calc_mre_K: 0.8611\n",
+      "Epoch 184/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 72.9283 - acc: 0.5086 - calc_mre_K: 0.8902 - val_loss: 75.1231 - val_acc: 0.4748 - val_calc_mre_K: 0.9170\n",
+      "Epoch 185/500\n",
+      "48000/48000 [==============================] - 5s 102us/step - loss: 72.7743 - acc: 0.5135 - calc_mre_K: 0.8884 - val_loss: 71.9495 - val_acc: 0.5043 - val_calc_mre_K: 0.8783\n",
+      "Epoch 186/500\n",
+      "48000/48000 [==============================] - 5s 101us/step - loss: 72.9175 - acc: 0.5051 - calc_mre_K: 0.8901 - val_loss: 70.9300 - val_acc: 0.4847 - val_calc_mre_K: 0.8658\n",
+      "Epoch 187/500\n",
+      "48000/48000 [==============================] - 5s 102us/step - loss: 72.8237 - acc: 0.5103 - calc_mre_K: 0.8890 - val_loss: 73.8793 - val_acc: 0.4793 - val_calc_mre_K: 0.9018\n",
+      "Epoch 188/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 72.4469 - acc: 0.5091 - calc_mre_K: 0.8844 - val_loss: 76.8523 - val_acc: 0.4911 - val_calc_mre_K: 0.9381\n",
+      "Epoch 189/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 72.6686 - acc: 0.5100 - calc_mre_K: 0.8871 - val_loss: 72.3860 - val_acc: 0.5162 - val_calc_mre_K: 0.8836\n",
+      "Epoch 190/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 72.6394 - acc: 0.5113 - calc_mre_K: 0.8867 - val_loss: 73.3801 - val_acc: 0.5258 - val_calc_mre_K: 0.8958\n",
+      "Epoch 191/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 72.7198 - acc: 0.5099 - calc_mre_K: 0.8877 - val_loss: 71.2865 - val_acc: 0.5029 - val_calc_mre_K: 0.8702\n",
+      "Epoch 192/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 72.3141 - acc: 0.5104 - calc_mre_K: 0.8827 - val_loss: 73.2752 - val_acc: 0.4831 - val_calc_mre_K: 0.8945\n",
+      "Epoch 193/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 72.4821 - acc: 0.5097 - calc_mre_K: 0.8848 - val_loss: 74.4272 - val_acc: 0.5059 - val_calc_mre_K: 0.9085\n",
+      "Epoch 194/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 72.2085 - acc: 0.5115 - calc_mre_K: 0.8815 - val_loss: 72.2919 - val_acc: 0.4912 - val_calc_mre_K: 0.8825\n",
+      "Epoch 195/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 72.2715 - acc: 0.5082 - calc_mre_K: 0.8822 - val_loss: 70.1302 - val_acc: 0.5296 - val_calc_mre_K: 0.8561\n",
+      "Epoch 196/500\n",
+      "48000/48000 [==============================] - 5s 97us/step - loss: 72.3458 - acc: 0.5024 - calc_mre_K: 0.8831 - val_loss: 80.5712 - val_acc: 0.4798 - val_calc_mre_K: 0.9835\n",
+      "Epoch 197/500\n",
+      "48000/48000 [==============================] - 5s 99us/step - loss: 72.0051 - acc: 0.5161 - calc_mre_K: 0.8790 - val_loss: 71.4732 - val_acc: 0.5160 - val_calc_mre_K: 0.8725\n",
+      "Epoch 198/500\n",
+      "48000/48000 [==============================] - 5s 97us/step - loss: 72.1788 - acc: 0.5072 - calc_mre_K: 0.8811 - val_loss: 70.7434 - val_acc: 0.4805 - val_calc_mre_K: 0.8636\n",
+      "Epoch 199/500\n",
+      "48000/48000 [==============================] - 5s 101us/step - loss: 72.1556 - acc: 0.5113 - calc_mre_K: 0.8808 - val_loss: 70.8514 - val_acc: 0.5147 - val_calc_mre_K: 0.8649\n",
+      "Epoch 200/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 71.8135 - acc: 0.5086 - calc_mre_K: 0.8766 - val_loss: 71.4373 - val_acc: 0.4833 - val_calc_mre_K: 0.8720\n",
+      "Epoch 201/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 72.1080 - acc: 0.5105 - calc_mre_K: 0.8802 - val_loss: 69.9839 - val_acc: 0.5120 - val_calc_mre_K: 0.8543\n",
+      "Epoch 202/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 71.6507 - acc: 0.5098 - calc_mre_K: 0.8746 - val_loss: 72.3482 - val_acc: 0.4924 - val_calc_mre_K: 0.8832\n",
+      "Epoch 203/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 72.0461 - acc: 0.5081 - calc_mre_K: 0.8795 - val_loss: 75.7827 - val_acc: 0.4637 - val_calc_mre_K: 0.9251\n",
+      "Epoch 204/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 71.5092 - acc: 0.5090 - calc_mre_K: 0.8729 - val_loss: 75.9759 - val_acc: 0.4483 - val_calc_mre_K: 0.9274\n",
+      "Epoch 205/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 71.6095 - acc: 0.5099 - calc_mre_K: 0.8741 - val_loss: 70.5871 - val_acc: 0.4955 - val_calc_mre_K: 0.8617\n",
+      "Epoch 206/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 71.4623 - acc: 0.5084 - calc_mre_K: 0.8723 - val_loss: 73.2273 - val_acc: 0.5025 - val_calc_mre_K: 0.8939\n",
+      "Epoch 207/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 71.2177 - acc: 0.5095 - calc_mre_K: 0.8694 - val_loss: 70.3571 - val_acc: 0.5135 - val_calc_mre_K: 0.8589\n",
+      "Epoch 208/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 71.6196 - acc: 0.5099 - calc_mre_K: 0.8743 - val_loss: 71.4115 - val_acc: 0.4970 - val_calc_mre_K: 0.8717\n",
+      "Epoch 209/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 71.2342 - acc: 0.5078 - calc_mre_K: 0.8696 - val_loss: 73.0754 - val_acc: 0.5019 - val_calc_mre_K: 0.8920\n",
+      "Epoch 210/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 71.4296 - acc: 0.5081 - calc_mre_K: 0.8719 - val_loss: 71.6825 - val_acc: 0.4992 - val_calc_mre_K: 0.8750\n",
+      "Epoch 211/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 71.2544 - acc: 0.5089 - calc_mre_K: 0.8698 - val_loss: 69.8168 - val_acc: 0.5118 - val_calc_mre_K: 0.8523\n",
+      "Epoch 212/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 70.9378 - acc: 0.5089 - calc_mre_K: 0.8659 - val_loss: 75.2854 - val_acc: 0.5229 - val_calc_mre_K: 0.9190\n",
+      "Epoch 213/500\n",
+      "48000/48000 [==============================] - 4s 83us/step - loss: 71.0606 - acc: 0.5101 - calc_mre_K: 0.8674 - val_loss: 71.9738 - val_acc: 0.4771 - val_calc_mre_K: 0.8786\n",
+      "Epoch 214/500\n",
+      "48000/48000 [==============================] - 4s 83us/step - loss: 71.0853 - acc: 0.5087 - calc_mre_K: 0.8677 - val_loss: 68.7224 - val_acc: 0.5215 - val_calc_mre_K: 0.8389\n",
+      "Epoch 215/500\n",
+      "48000/48000 [==============================] - 4s 82us/step - loss: 71.1411 - acc: 0.5101 - calc_mre_K: 0.8684 - val_loss: 71.2659 - val_acc: 0.5114 - val_calc_mre_K: 0.8699\n",
+      "Epoch 216/500\n",
+      "48000/48000 [==============================] - 4s 83us/step - loss: 70.9014 - acc: 0.5121 - calc_mre_K: 0.8655 - val_loss: 68.8736 - val_acc: 0.5305 - val_calc_mre_K: 0.8407\n",
+      "Epoch 217/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 70.6207 - acc: 0.5132 - calc_mre_K: 0.8621 - val_loss: 69.9096 - val_acc: 0.4893 - val_calc_mre_K: 0.8534\n",
+      "Epoch 218/500\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "48000/48000 [==============================] - 5s 98us/step - loss: 70.5734 - acc: 0.5100 - calc_mre_K: 0.8615 - val_loss: 68.4461 - val_acc: 0.5214 - val_calc_mre_K: 0.8355\n",
+      "Epoch 219/500\n",
+      "48000/48000 [==============================] - 5s 100us/step - loss: 70.7573 - acc: 0.5095 - calc_mre_K: 0.8637 - val_loss: 72.1420 - val_acc: 0.4630 - val_calc_mre_K: 0.8806\n",
+      "Epoch 220/500\n",
+      "48000/48000 [==============================] - 5s 105us/step - loss: 70.6328 - acc: 0.5116 - calc_mre_K: 0.8622 - val_loss: 70.0440 - val_acc: 0.5124 - val_calc_mre_K: 0.8550\n",
+      "Epoch 221/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 70.6602 - acc: 0.5064 - calc_mre_K: 0.8626 - val_loss: 69.4095 - val_acc: 0.5067 - val_calc_mre_K: 0.8473\n",
+      "Epoch 222/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 70.5734 - acc: 0.5116 - calc_mre_K: 0.8615 - val_loss: 69.9488 - val_acc: 0.4865 - val_calc_mre_K: 0.8539\n",
+      "Epoch 223/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 70.2723 - acc: 0.5126 - calc_mre_K: 0.8578 - val_loss: 71.5719 - val_acc: 0.5046 - val_calc_mre_K: 0.8737\n",
+      "Epoch 224/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 70.4415 - acc: 0.5113 - calc_mre_K: 0.8599 - val_loss: 69.6706 - val_acc: 0.4841 - val_calc_mre_K: 0.8505\n",
+      "Epoch 225/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 70.0426 - acc: 0.5118 - calc_mre_K: 0.8550 - val_loss: 70.2994 - val_acc: 0.5225 - val_calc_mre_K: 0.8581\n",
+      "Epoch 226/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 70.5714 - acc: 0.5158 - calc_mre_K: 0.8615 - val_loss: 69.6169 - val_acc: 0.5159 - val_calc_mre_K: 0.8498\n",
+      "Epoch 227/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 70.2296 - acc: 0.5098 - calc_mre_K: 0.8573 - val_loss: 69.3572 - val_acc: 0.4858 - val_calc_mre_K: 0.8466\n",
+      "Epoch 228/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 69.9333 - acc: 0.5141 - calc_mre_K: 0.8537 - val_loss: 73.0055 - val_acc: 0.5109 - val_calc_mre_K: 0.8912\n",
+      "Epoch 229/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 70.0580 - acc: 0.5117 - calc_mre_K: 0.8552 - val_loss: 70.8461 - val_acc: 0.5128 - val_calc_mre_K: 0.8648\n",
+      "Epoch 230/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 70.0059 - acc: 0.5118 - calc_mre_K: 0.8546 - val_loss: 67.7978 - val_acc: 0.5362 - val_calc_mre_K: 0.8276\n",
+      "Epoch 231/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 70.3361 - acc: 0.5122 - calc_mre_K: 0.8586 - val_loss: 70.7487 - val_acc: 0.5072 - val_calc_mre_K: 0.8636\n",
+      "Epoch 232/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 69.7294 - acc: 0.5126 - calc_mre_K: 0.8512 - val_loss: 69.3195 - val_acc: 0.5483 - val_calc_mre_K: 0.8462\n",
+      "Epoch 233/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 69.5621 - acc: 0.5137 - calc_mre_K: 0.8491 - val_loss: 69.0560 - val_acc: 0.5064 - val_calc_mre_K: 0.8430\n",
+      "Epoch 234/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 69.8086 - acc: 0.5083 - calc_mre_K: 0.8522 - val_loss: 68.9760 - val_acc: 0.4894 - val_calc_mre_K: 0.8420\n",
+      "Epoch 235/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 69.6077 - acc: 0.5117 - calc_mre_K: 0.8497 - val_loss: 68.3542 - val_acc: 0.5288 - val_calc_mre_K: 0.8344\n",
+      "Epoch 236/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 69.7521 - acc: 0.5094 - calc_mre_K: 0.8515 - val_loss: 71.4088 - val_acc: 0.5221 - val_calc_mre_K: 0.8717\n",
+      "Epoch 237/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 69.4764 - acc: 0.5136 - calc_mre_K: 0.8481 - val_loss: 71.7303 - val_acc: 0.5384 - val_calc_mre_K: 0.8756\n",
+      "Epoch 238/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 69.5478 - acc: 0.5142 - calc_mre_K: 0.8490 - val_loss: 68.1535 - val_acc: 0.5106 - val_calc_mre_K: 0.8320\n",
+      "Epoch 239/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 69.2040 - acc: 0.5142 - calc_mre_K: 0.8448 - val_loss: 67.4673 - val_acc: 0.5105 - val_calc_mre_K: 0.8236\n",
+      "Epoch 240/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 69.5368 - acc: 0.5147 - calc_mre_K: 0.8488 - val_loss: 72.6004 - val_acc: 0.4777 - val_calc_mre_K: 0.8862\n",
+      "Epoch 241/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 69.0906 - acc: 0.5162 - calc_mre_K: 0.8434 - val_loss: 67.6580 - val_acc: 0.5367 - val_calc_mre_K: 0.8259\n",
+      "Epoch 242/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 69.1507 - acc: 0.5132 - calc_mre_K: 0.8441 - val_loss: 69.6413 - val_acc: 0.5177 - val_calc_mre_K: 0.8501\n",
+      "Epoch 243/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 69.3356 - acc: 0.5128 - calc_mre_K: 0.8464 - val_loss: 70.0220 - val_acc: 0.5212 - val_calc_mre_K: 0.8548\n",
+      "Epoch 244/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 68.9391 - acc: 0.5162 - calc_mre_K: 0.8415 - val_loss: 68.8002 - val_acc: 0.5178 - val_calc_mre_K: 0.8398\n",
+      "Epoch 245/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 69.4595 - acc: 0.5144 - calc_mre_K: 0.8479 - val_loss: 69.3605 - val_acc: 0.5363 - val_calc_mre_K: 0.8467\n",
+      "Epoch 246/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 68.8264 - acc: 0.5155 - calc_mre_K: 0.8402 - val_loss: 69.8590 - val_acc: 0.5050 - val_calc_mre_K: 0.8528\n",
+      "Epoch 247/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 68.9958 - acc: 0.5117 - calc_mre_K: 0.8422 - val_loss: 71.0360 - val_acc: 0.5104 - val_calc_mre_K: 0.8671\n",
+      "Epoch 248/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 68.7445 - acc: 0.5150 - calc_mre_K: 0.8392 - val_loss: 71.8107 - val_acc: 0.4647 - val_calc_mre_K: 0.8766\n",
+      "Epoch 249/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 68.8668 - acc: 0.5165 - calc_mre_K: 0.8407 - val_loss: 70.6469 - val_acc: 0.4763 - val_calc_mre_K: 0.8624\n",
+      "Epoch 250/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 68.5890 - acc: 0.5165 - calc_mre_K: 0.8373 - val_loss: 67.8773 - val_acc: 0.5093 - val_calc_mre_K: 0.8286\n",
+      "Epoch 251/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 68.7970 - acc: 0.5126 - calc_mre_K: 0.8398 - val_loss: 71.7474 - val_acc: 0.5222 - val_calc_mre_K: 0.8758\n",
+      "Epoch 252/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 68.6502 - acc: 0.5107 - calc_mre_K: 0.8380 - val_loss: 68.0442 - val_acc: 0.5326 - val_calc_mre_K: 0.8306\n",
+      "Epoch 253/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 68.6987 - acc: 0.5164 - calc_mre_K: 0.8386 - val_loss: 71.8855 - val_acc: 0.5335 - val_calc_mre_K: 0.8775\n",
+      "Epoch 254/500\n",
+      "48000/48000 [==============================] - 4s 83us/step - loss: 68.4111 - acc: 0.5153 - calc_mre_K: 0.8351 - val_loss: 69.8628 - val_acc: 0.5026 - val_calc_mre_K: 0.8528\n",
+      "Epoch 255/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 68.7604 - acc: 0.5160 - calc_mre_K: 0.8394 - val_loss: 68.1165 - val_acc: 0.5225 - val_calc_mre_K: 0.8315\n",
+      "Epoch 256/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 68.2556 - acc: 0.5186 - calc_mre_K: 0.8332 - val_loss: 67.1617 - val_acc: 0.5399 - val_calc_mre_K: 0.8198\n",
+      "Epoch 257/500\n",
+      "48000/48000 [==============================] - 4s 83us/step - loss: 68.5405 - acc: 0.5171 - calc_mre_K: 0.8367 - val_loss: 68.7027 - val_acc: 0.5222 - val_calc_mre_K: 0.8387\n",
+      "Epoch 258/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 68.2598 - acc: 0.5192 - calc_mre_K: 0.8332 - val_loss: 66.9126 - val_acc: 0.5360 - val_calc_mre_K: 0.8168\n",
+      "Epoch 259/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 68.3030 - acc: 0.5185 - calc_mre_K: 0.8338 - val_loss: 70.5783 - val_acc: 0.4795 - val_calc_mre_K: 0.8616\n",
+      "Epoch 260/500\n",
+      "48000/48000 [==============================] - 4s 83us/step - loss: 68.4196 - acc: 0.5156 - calc_mre_K: 0.8352 - val_loss: 69.0432 - val_acc: 0.5198 - val_calc_mre_K: 0.8428\n",
+      "Epoch 261/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 68.7172 - acc: 0.5180 - calc_mre_K: 0.8388 - val_loss: 72.1518 - val_acc: 0.5055 - val_calc_mre_K: 0.8808\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch 262/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 68.2108 - acc: 0.5155 - calc_mre_K: 0.8327 - val_loss: 69.0060 - val_acc: 0.5426 - val_calc_mre_K: 0.8424\n",
+      "Epoch 263/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 68.2075 - acc: 0.5191 - calc_mre_K: 0.8326 - val_loss: 69.1150 - val_acc: 0.4857 - val_calc_mre_K: 0.8437\n",
+      "Epoch 264/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 68.1122 - acc: 0.5200 - calc_mre_K: 0.8314 - val_loss: 66.4298 - val_acc: 0.5577 - val_calc_mre_K: 0.8109\n",
+      "Epoch 265/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 68.1811 - acc: 0.5171 - calc_mre_K: 0.8323 - val_loss: 69.9311 - val_acc: 0.5117 - val_calc_mre_K: 0.8537\n",
+      "Epoch 266/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 68.1196 - acc: 0.5196 - calc_mre_K: 0.8315 - val_loss: 68.3410 - val_acc: 0.5437 - val_calc_mre_K: 0.8342\n",
+      "Epoch 267/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 68.4249 - acc: 0.5187 - calc_mre_K: 0.8353 - val_loss: 67.2816 - val_acc: 0.5489 - val_calc_mre_K: 0.8213\n",
+      "Epoch 268/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 67.7494 - acc: 0.5186 - calc_mre_K: 0.8270 - val_loss: 67.5408 - val_acc: 0.5258 - val_calc_mre_K: 0.8245\n",
+      "Epoch 269/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 67.8745 - acc: 0.5202 - calc_mre_K: 0.8285 - val_loss: 68.2184 - val_acc: 0.5050 - val_calc_mre_K: 0.8327\n",
+      "Epoch 270/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 67.8141 - acc: 0.5238 - calc_mre_K: 0.8278 - val_loss: 66.9309 - val_acc: 0.5066 - val_calc_mre_K: 0.8170\n",
+      "Epoch 271/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 67.9668 - acc: 0.5179 - calc_mre_K: 0.8297 - val_loss: 68.8530 - val_acc: 0.5049 - val_calc_mre_K: 0.8405\n",
+      "Epoch 272/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 67.8425 - acc: 0.5240 - calc_mre_K: 0.8282 - val_loss: 72.1010 - val_acc: 0.5114 - val_calc_mre_K: 0.8801\n",
+      "Epoch 273/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 67.7155 - acc: 0.5218 - calc_mre_K: 0.8266 - val_loss: 67.0138 - val_acc: 0.5131 - val_calc_mre_K: 0.8180\n",
+      "Epoch 274/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 67.8965 - acc: 0.5268 - calc_mre_K: 0.8288 - val_loss: 66.8117 - val_acc: 0.5282 - val_calc_mre_K: 0.8156\n",
+      "Epoch 275/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 67.5682 - acc: 0.5204 - calc_mre_K: 0.8248 - val_loss: 68.4019 - val_acc: 0.5247 - val_calc_mre_K: 0.8350\n",
+      "Epoch 276/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 67.6426 - acc: 0.5206 - calc_mre_K: 0.8257 - val_loss: 68.2580 - val_acc: 0.5376 - val_calc_mre_K: 0.8332\n",
+      "Epoch 277/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 67.4348 - acc: 0.5219 - calc_mre_K: 0.8232 - val_loss: 73.4739 - val_acc: 0.4967 - val_calc_mre_K: 0.8969\n",
+      "Epoch 278/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 67.7097 - acc: 0.5211 - calc_mre_K: 0.8265 - val_loss: 68.2373 - val_acc: 0.5329 - val_calc_mre_K: 0.8330\n",
+      "Epoch 279/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 67.4023 - acc: 0.5249 - calc_mre_K: 0.8228 - val_loss: 67.5524 - val_acc: 0.5262 - val_calc_mre_K: 0.8246\n",
+      "Epoch 280/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 67.3365 - acc: 0.5224 - calc_mre_K: 0.8220 - val_loss: 68.4456 - val_acc: 0.5034 - val_calc_mre_K: 0.8355\n",
+      "Epoch 281/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 67.3260 - acc: 0.5251 - calc_mre_K: 0.8219 - val_loss: 67.8452 - val_acc: 0.5198 - val_calc_mre_K: 0.8282\n",
+      "Epoch 282/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 67.2990 - acc: 0.5180 - calc_mre_K: 0.8215 - val_loss: 67.8376 - val_acc: 0.5107 - val_calc_mre_K: 0.8281\n",
+      "Epoch 283/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 67.4577 - acc: 0.5216 - calc_mre_K: 0.8235 - val_loss: 66.6278 - val_acc: 0.5172 - val_calc_mre_K: 0.8133\n",
+      "Epoch 284/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 67.5761 - acc: 0.5241 - calc_mre_K: 0.8249 - val_loss: 67.3655 - val_acc: 0.5349 - val_calc_mre_K: 0.8223\n",
+      "Epoch 285/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 67.2281 - acc: 0.5207 - calc_mre_K: 0.8207 - val_loss: 69.5470 - val_acc: 0.5249 - val_calc_mre_K: 0.8490\n",
+      "Epoch 286/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 67.6837 - acc: 0.5256 - calc_mre_K: 0.8262 - val_loss: 69.8552 - val_acc: 0.4759 - val_calc_mre_K: 0.8527\n",
+      "Epoch 287/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 67.0632 - acc: 0.5260 - calc_mre_K: 0.8186 - val_loss: 66.5108 - val_acc: 0.5095 - val_calc_mre_K: 0.8119\n",
+      "Epoch 288/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 67.0797 - acc: 0.5235 - calc_mre_K: 0.8188 - val_loss: 68.3970 - val_acc: 0.5234 - val_calc_mre_K: 0.8349\n",
+      "Epoch 289/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 67.2379 - acc: 0.5242 - calc_mre_K: 0.8208 - val_loss: 67.9588 - val_acc: 0.5357 - val_calc_mre_K: 0.8296\n",
+      "Epoch 290/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 67.2487 - acc: 0.5225 - calc_mre_K: 0.8209 - val_loss: 66.8450 - val_acc: 0.5497 - val_calc_mre_K: 0.8160\n",
+      "Epoch 291/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 67.0439 - acc: 0.5261 - calc_mre_K: 0.8184 - val_loss: 70.1276 - val_acc: 0.4843 - val_calc_mre_K: 0.8560\n",
+      "Epoch 292/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 67.0380 - acc: 0.5192 - calc_mre_K: 0.8183 - val_loss: 67.8414 - val_acc: 0.4973 - val_calc_mre_K: 0.8281\n",
+      "Epoch 293/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 66.8291 - acc: 0.5252 - calc_mre_K: 0.8158 - val_loss: 67.7196 - val_acc: 0.5307 - val_calc_mre_K: 0.8267\n",
+      "Epoch 294/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 67.0423 - acc: 0.5212 - calc_mre_K: 0.8184 - val_loss: 66.5205 - val_acc: 0.5338 - val_calc_mre_K: 0.8120\n",
+      "Epoch 295/500\n",
+      "48000/48000 [==============================] - 4s 94us/step - loss: 66.8146 - acc: 0.5264 - calc_mre_K: 0.8156 - val_loss: 67.5495 - val_acc: 0.4968 - val_calc_mre_K: 0.8246\n",
+      "Epoch 296/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 66.8511 - acc: 0.5254 - calc_mre_K: 0.8161 - val_loss: 68.5391 - val_acc: 0.5202 - val_calc_mre_K: 0.8367\n",
+      "Epoch 297/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 66.8410 - acc: 0.5259 - calc_mre_K: 0.8159 - val_loss: 66.6134 - val_acc: 0.5185 - val_calc_mre_K: 0.8132\n",
+      "Epoch 298/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 66.6581 - acc: 0.5246 - calc_mre_K: 0.8137 - val_loss: 69.1278 - val_acc: 0.5024 - val_calc_mre_K: 0.8438\n",
+      "Epoch 299/500\n",
+      "48000/48000 [==============================] - 4s 94us/step - loss: 66.8537 - acc: 0.5306 - calc_mre_K: 0.8161 - val_loss: 66.1631 - val_acc: 0.5297 - val_calc_mre_K: 0.8077\n",
+      "Epoch 300/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 66.5885 - acc: 0.5236 - calc_mre_K: 0.8128 - val_loss: 67.0897 - val_acc: 0.5252 - val_calc_mre_K: 0.8190\n",
+      "Epoch 301/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 66.7008 - acc: 0.5228 - calc_mre_K: 0.8142 - val_loss: 68.3629 - val_acc: 0.5485 - val_calc_mre_K: 0.8345\n",
+      "Epoch 302/500\n",
+      "48000/48000 [==============================] - 4s 94us/step - loss: 66.5060 - acc: 0.5242 - calc_mre_K: 0.8118 - val_loss: 66.0771 - val_acc: 0.5018 - val_calc_mre_K: 0.8066\n",
+      "Epoch 303/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 66.5878 - acc: 0.5257 - calc_mre_K: 0.8128 - val_loss: 69.8067 - val_acc: 0.5186 - val_calc_mre_K: 0.8521\n",
+      "Epoch 304/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 66.6303 - acc: 0.5246 - calc_mre_K: 0.8134 - val_loss: 66.4919 - val_acc: 0.5087 - val_calc_mre_K: 0.8117\n",
+      "Epoch 305/500\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "48000/48000 [==============================] - 4s 93us/step - loss: 66.4958 - acc: 0.5235 - calc_mre_K: 0.8117 - val_loss: 65.6847 - val_acc: 0.5271 - val_calc_mre_K: 0.8018\n",
+      "Epoch 306/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 66.0691 - acc: 0.5272 - calc_mre_K: 0.8065 - val_loss: 66.3119 - val_acc: 0.5327 - val_calc_mre_K: 0.8095\n",
+      "Epoch 307/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 66.4370 - acc: 0.5255 - calc_mre_K: 0.8110 - val_loss: 66.5318 - val_acc: 0.5268 - val_calc_mre_K: 0.8122\n",
+      "Epoch 308/500\n",
+      "48000/48000 [==============================] - 5s 96us/step - loss: 66.3945 - acc: 0.5220 - calc_mre_K: 0.8105 - val_loss: 65.1229 - val_acc: 0.5443 - val_calc_mre_K: 0.7950\n",
+      "Epoch 309/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 66.4299 - acc: 0.5270 - calc_mre_K: 0.8109 - val_loss: 66.9808 - val_acc: 0.5443 - val_calc_mre_K: 0.8176\n",
+      "Epoch 310/500\n",
+      "48000/48000 [==============================] - 5s 97us/step - loss: 65.9804 - acc: 0.5238 - calc_mre_K: 0.8054 - val_loss: 69.2893 - val_acc: 0.5338 - val_calc_mre_K: 0.8458\n",
+      "Epoch 311/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 66.3364 - acc: 0.5268 - calc_mre_K: 0.8098 - val_loss: 65.4976 - val_acc: 0.5056 - val_calc_mre_K: 0.7995\n",
+      "Epoch 312/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 66.1712 - acc: 0.5252 - calc_mre_K: 0.8078 - val_loss: 66.3289 - val_acc: 0.5414 - val_calc_mre_K: 0.8097\n",
+      "Epoch 313/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 65.9269 - acc: 0.5221 - calc_mre_K: 0.8048 - val_loss: 67.8052 - val_acc: 0.5091 - val_calc_mre_K: 0.8277\n",
+      "Epoch 314/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 66.3824 - acc: 0.5271 - calc_mre_K: 0.8103 - val_loss: 68.0080 - val_acc: 0.4647 - val_calc_mre_K: 0.8302\n",
+      "Epoch 315/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 66.1229 - acc: 0.5282 - calc_mre_K: 0.8072 - val_loss: 65.0115 - val_acc: 0.5645 - val_calc_mre_K: 0.7936\n",
+      "Epoch 316/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 66.1937 - acc: 0.5292 - calc_mre_K: 0.8080 - val_loss: 64.9945 - val_acc: 0.5162 - val_calc_mre_K: 0.7934\n",
+      "Epoch 317/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 65.8147 - acc: 0.5291 - calc_mre_K: 0.8034 - val_loss: 65.5184 - val_acc: 0.5329 - val_calc_mre_K: 0.7998\n",
+      "Epoch 318/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 66.0071 - acc: 0.5285 - calc_mre_K: 0.8058 - val_loss: 65.8533 - val_acc: 0.5458 - val_calc_mre_K: 0.8039\n",
+      "Epoch 319/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 66.1298 - acc: 0.5263 - calc_mre_K: 0.8072 - val_loss: 68.1986 - val_acc: 0.5104 - val_calc_mre_K: 0.8325\n",
+      "Epoch 320/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 65.6651 - acc: 0.5318 - calc_mre_K: 0.8016 - val_loss: 64.8207 - val_acc: 0.5055 - val_calc_mre_K: 0.7913\n",
+      "Epoch 321/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 65.8679 - acc: 0.5301 - calc_mre_K: 0.8041 - val_loss: 65.9954 - val_acc: 0.5309 - val_calc_mre_K: 0.8056\n",
+      "Epoch 322/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 65.8290 - acc: 0.5261 - calc_mre_K: 0.8036 - val_loss: 67.1092 - val_acc: 0.5503 - val_calc_mre_K: 0.8192\n",
+      "Epoch 323/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 65.7777 - acc: 0.5269 - calc_mre_K: 0.8030 - val_loss: 65.0085 - val_acc: 0.5164 - val_calc_mre_K: 0.7936\n",
+      "Epoch 324/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 65.5681 - acc: 0.5289 - calc_mre_K: 0.8004 - val_loss: 65.9367 - val_acc: 0.5564 - val_calc_mre_K: 0.8049\n",
+      "Epoch 325/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 65.8868 - acc: 0.5304 - calc_mre_K: 0.8043 - val_loss: 67.1764 - val_acc: 0.5386 - val_calc_mre_K: 0.8200\n",
+      "Epoch 326/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 65.7699 - acc: 0.5288 - calc_mre_K: 0.8029 - val_loss: 67.0963 - val_acc: 0.5228 - val_calc_mre_K: 0.8190\n",
+      "Epoch 327/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 65.9040 - acc: 0.5309 - calc_mre_K: 0.8045 - val_loss: 64.0822 - val_acc: 0.5218 - val_calc_mre_K: 0.7823\n",
+      "Epoch 328/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 65.3745 - acc: 0.5320 - calc_mre_K: 0.7980 - val_loss: 65.4985 - val_acc: 0.5233 - val_calc_mre_K: 0.7995\n",
+      "Epoch 329/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 65.4816 - acc: 0.5287 - calc_mre_K: 0.7993 - val_loss: 66.2387 - val_acc: 0.5197 - val_calc_mre_K: 0.8086\n",
+      "Epoch 330/500\n",
+      "48000/48000 [==============================] - 4s 82us/step - loss: 65.6216 - acc: 0.5301 - calc_mre_K: 0.8010 - val_loss: 67.0101 - val_acc: 0.5241 - val_calc_mre_K: 0.8180\n",
+      "Epoch 331/500\n",
+      "48000/48000 [==============================] - 4s 83us/step - loss: 65.7152 - acc: 0.5265 - calc_mre_K: 0.8022 - val_loss: 66.3821 - val_acc: 0.5187 - val_calc_mre_K: 0.8103\n",
+      "Epoch 332/500\n",
+      "48000/48000 [==============================] - 4s 83us/step - loss: 65.5373 - acc: 0.5306 - calc_mre_K: 0.8000 - val_loss: 64.1404 - val_acc: 0.5207 - val_calc_mre_K: 0.7830\n",
+      "Epoch 333/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 65.5720 - acc: 0.5307 - calc_mre_K: 0.8004 - val_loss: 63.9103 - val_acc: 0.5512 - val_calc_mre_K: 0.7802\n",
+      "Epoch 334/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 65.4199 - acc: 0.5316 - calc_mre_K: 0.7986 - val_loss: 67.6106 - val_acc: 0.5338 - val_calc_mre_K: 0.8253\n",
+      "Epoch 335/500\n",
+      "48000/48000 [==============================] - 4s 83us/step - loss: 65.5175 - acc: 0.5279 - calc_mre_K: 0.7998 - val_loss: 64.9959 - val_acc: 0.5435 - val_calc_mre_K: 0.7934\n",
+      "Epoch 336/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 65.3697 - acc: 0.5351 - calc_mre_K: 0.7980 - val_loss: 73.4226 - val_acc: 0.5008 - val_calc_mre_K: 0.8963\n",
+      "Epoch 337/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 65.4075 - acc: 0.5301 - calc_mre_K: 0.7984 - val_loss: 66.0216 - val_acc: 0.5460 - val_calc_mre_K: 0.8059\n",
+      "Epoch 338/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 65.3658 - acc: 0.5339 - calc_mre_K: 0.7979 - val_loss: 71.0161 - val_acc: 0.5141 - val_calc_mre_K: 0.8669\n",
+      "Epoch 339/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 65.0628 - acc: 0.5307 - calc_mre_K: 0.7942 - val_loss: 64.2607 - val_acc: 0.5192 - val_calc_mre_K: 0.7844\n",
+      "Epoch 340/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 65.2885 - acc: 0.5277 - calc_mre_K: 0.7970 - val_loss: 68.1385 - val_acc: 0.5568 - val_calc_mre_K: 0.8318\n",
+      "Epoch 341/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 65.2020 - acc: 0.5336 - calc_mre_K: 0.7959 - val_loss: 64.4638 - val_acc: 0.5033 - val_calc_mre_K: 0.7869\n",
+      "Epoch 342/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 65.3592 - acc: 0.5317 - calc_mre_K: 0.7978 - val_loss: 66.8454 - val_acc: 0.5439 - val_calc_mre_K: 0.8160\n",
+      "Epoch 343/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 65.1835 - acc: 0.5299 - calc_mre_K: 0.7957 - val_loss: 64.9609 - val_acc: 0.5419 - val_calc_mre_K: 0.7930\n",
+      "Epoch 344/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 65.0741 - acc: 0.5362 - calc_mre_K: 0.7944 - val_loss: 63.9964 - val_acc: 0.5189 - val_calc_mre_K: 0.7812\n",
+      "Epoch 345/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 64.9748 - acc: 0.5329 - calc_mre_K: 0.7931 - val_loss: 63.8704 - val_acc: 0.5628 - val_calc_mre_K: 0.7797\n",
+      "Epoch 346/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 65.2563 - acc: 0.5313 - calc_mre_K: 0.7966 - val_loss: 65.9574 - val_acc: 0.5298 - val_calc_mre_K: 0.8051\n",
+      "Epoch 347/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 64.9228 - acc: 0.5355 - calc_mre_K: 0.7925 - val_loss: 67.6337 - val_acc: 0.5477 - val_calc_mre_K: 0.8256\n",
+      "Epoch 348/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 64.9495 - acc: 0.5320 - calc_mre_K: 0.7928 - val_loss: 64.6126 - val_acc: 0.5392 - val_calc_mre_K: 0.7887\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch 349/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 65.1191 - acc: 0.5327 - calc_mre_K: 0.7949 - val_loss: 64.7480 - val_acc: 0.5513 - val_calc_mre_K: 0.7904\n",
+      "Epoch 350/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 65.0700 - acc: 0.5322 - calc_mre_K: 0.7943 - val_loss: 66.3277 - val_acc: 0.5478 - val_calc_mre_K: 0.8097\n",
+      "Epoch 351/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 64.6839 - acc: 0.5369 - calc_mre_K: 0.7896 - val_loss: 67.0021 - val_acc: 0.4902 - val_calc_mre_K: 0.8179\n",
+      "Epoch 352/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 65.0273 - acc: 0.5339 - calc_mre_K: 0.7938 - val_loss: 63.4740 - val_acc: 0.5369 - val_calc_mre_K: 0.7748\n",
+      "Epoch 353/500\n",
+      "48000/48000 [==============================] - 5s 98us/step - loss: 64.5409 - acc: 0.5300 - calc_mre_K: 0.7879 - val_loss: 64.0043 - val_acc: 0.5039 - val_calc_mre_K: 0.7813\n",
+      "Epoch 354/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 64.9948 - acc: 0.5346 - calc_mre_K: 0.7934 - val_loss: 65.6598 - val_acc: 0.5624 - val_calc_mre_K: 0.8015\n",
+      "Epoch 355/500\n",
+      "48000/48000 [==============================] - 5s 98us/step - loss: 64.8481 - acc: 0.5381 - calc_mre_K: 0.7916 - val_loss: 63.9691 - val_acc: 0.5733 - val_calc_mre_K: 0.7809\n",
+      "Epoch 356/500\n",
+      "48000/48000 [==============================] - 5s 96us/step - loss: 64.6918 - acc: 0.5360 - calc_mre_K: 0.7897 - val_loss: 65.4086 - val_acc: 0.5168 - val_calc_mre_K: 0.7984\n",
+      "Epoch 357/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 64.7773 - acc: 0.5381 - calc_mre_K: 0.7907 - val_loss: 64.4929 - val_acc: 0.5126 - val_calc_mre_K: 0.7873\n",
+      "Epoch 358/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 64.8244 - acc: 0.5373 - calc_mre_K: 0.7913 - val_loss: 64.2489 - val_acc: 0.5644 - val_calc_mre_K: 0.7843\n",
+      "Epoch 359/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 64.6375 - acc: 0.5367 - calc_mre_K: 0.7890 - val_loss: 65.3232 - val_acc: 0.5405 - val_calc_mre_K: 0.7974\n",
+      "Epoch 360/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 64.5874 - acc: 0.5356 - calc_mre_K: 0.7884 - val_loss: 65.3026 - val_acc: 0.5333 - val_calc_mre_K: 0.7972\n",
+      "Epoch 361/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 64.7534 - acc: 0.5365 - calc_mre_K: 0.7904 - val_loss: 63.4635 - val_acc: 0.5552 - val_calc_mre_K: 0.7747\n",
+      "Epoch 362/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 64.6451 - acc: 0.5373 - calc_mre_K: 0.7891 - val_loss: 64.1938 - val_acc: 0.5308 - val_calc_mre_K: 0.7836\n",
+      "Epoch 363/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 64.5896 - acc: 0.5338 - calc_mre_K: 0.7884 - val_loss: 64.9814 - val_acc: 0.5327 - val_calc_mre_K: 0.7932\n",
+      "Epoch 364/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 64.5169 - acc: 0.5393 - calc_mre_K: 0.7876 - val_loss: 63.4700 - val_acc: 0.5599 - val_calc_mre_K: 0.7748\n",
+      "Epoch 365/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 64.4768 - acc: 0.5367 - calc_mre_K: 0.7871 - val_loss: 65.3696 - val_acc: 0.5379 - val_calc_mre_K: 0.7980\n",
+      "Epoch 366/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 64.5484 - acc: 0.5389 - calc_mre_K: 0.7879 - val_loss: 67.5539 - val_acc: 0.5725 - val_calc_mre_K: 0.8246\n",
+      "Epoch 367/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 64.5890 - acc: 0.5385 - calc_mre_K: 0.7884 - val_loss: 65.9416 - val_acc: 0.5720 - val_calc_mre_K: 0.8050\n",
+      "Epoch 368/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 64.5501 - acc: 0.5392 - calc_mre_K: 0.7880 - val_loss: 66.4929 - val_acc: 0.4914 - val_calc_mre_K: 0.8117\n",
+      "Epoch 369/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 64.3931 - acc: 0.5388 - calc_mre_K: 0.7860 - val_loss: 63.2603 - val_acc: 0.5443 - val_calc_mre_K: 0.7722\n",
+      "Epoch 370/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 64.5202 - acc: 0.5383 - calc_mre_K: 0.7876 - val_loss: 65.0293 - val_acc: 0.5096 - val_calc_mre_K: 0.7938\n",
+      "Epoch 371/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 64.4078 - acc: 0.5380 - calc_mre_K: 0.7862 - val_loss: 64.9222 - val_acc: 0.5371 - val_calc_mre_K: 0.7925\n",
+      "Epoch 372/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 64.3470 - acc: 0.5411 - calc_mre_K: 0.7855 - val_loss: 62.7564 - val_acc: 0.5335 - val_calc_mre_K: 0.7661\n",
+      "Epoch 373/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 64.2426 - acc: 0.5396 - calc_mre_K: 0.7842 - val_loss: 65.0380 - val_acc: 0.5649 - val_calc_mre_K: 0.7939\n",
+      "Epoch 374/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 64.3259 - acc: 0.5415 - calc_mre_K: 0.7852 - val_loss: 63.5858 - val_acc: 0.5294 - val_calc_mre_K: 0.7762\n",
+      "Epoch 375/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 64.4490 - acc: 0.5403 - calc_mre_K: 0.7867 - val_loss: 64.0360 - val_acc: 0.5341 - val_calc_mre_K: 0.7817\n",
+      "Epoch 376/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 64.1305 - acc: 0.5409 - calc_mre_K: 0.7828 - val_loss: 62.3079 - val_acc: 0.5736 - val_calc_mre_K: 0.7606\n",
+      "Epoch 377/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 64.3056 - acc: 0.5369 - calc_mre_K: 0.7850 - val_loss: 64.5261 - val_acc: 0.5449 - val_calc_mre_K: 0.7877\n",
+      "Epoch 378/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 64.3113 - acc: 0.5416 - calc_mre_K: 0.7850 - val_loss: 64.7911 - val_acc: 0.5002 - val_calc_mre_K: 0.7909\n",
+      "Epoch 379/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 64.1666 - acc: 0.5403 - calc_mre_K: 0.7833 - val_loss: 63.7560 - val_acc: 0.5492 - val_calc_mre_K: 0.7783\n",
+      "Epoch 380/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 64.3239 - acc: 0.5353 - calc_mre_K: 0.7852 - val_loss: 64.3124 - val_acc: 0.5117 - val_calc_mre_K: 0.7851\n",
+      "Epoch 381/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 64.2385 - acc: 0.5402 - calc_mre_K: 0.7842 - val_loss: 64.5276 - val_acc: 0.5613 - val_calc_mre_K: 0.7877\n",
+      "Epoch 382/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 64.0126 - acc: 0.5358 - calc_mre_K: 0.7814 - val_loss: 63.1336 - val_acc: 0.5571 - val_calc_mre_K: 0.7707\n",
+      "Epoch 383/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 64.2490 - acc: 0.5425 - calc_mre_K: 0.7843 - val_loss: 63.4421 - val_acc: 0.5199 - val_calc_mre_K: 0.7744\n",
+      "Epoch 384/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 64.0310 - acc: 0.5391 - calc_mre_K: 0.7816 - val_loss: 63.1400 - val_acc: 0.5872 - val_calc_mre_K: 0.7708\n",
+      "Epoch 385/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 63.9692 - acc: 0.5419 - calc_mre_K: 0.7809 - val_loss: 62.7680 - val_acc: 0.5798 - val_calc_mre_K: 0.7662\n",
+      "Epoch 386/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 64.1907 - acc: 0.5412 - calc_mre_K: 0.7836 - val_loss: 66.2256 - val_acc: 0.5248 - val_calc_mre_K: 0.8084\n",
+      "Epoch 387/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 64.0641 - acc: 0.5392 - calc_mre_K: 0.7820 - val_loss: 62.5979 - val_acc: 0.5362 - val_calc_mre_K: 0.7641\n",
+      "Epoch 388/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 64.0346 - acc: 0.5426 - calc_mre_K: 0.7817 - val_loss: 64.0542 - val_acc: 0.5614 - val_calc_mre_K: 0.7819\n",
+      "Epoch 389/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 64.0478 - acc: 0.5390 - calc_mre_K: 0.7818 - val_loss: 62.2111 - val_acc: 0.5666 - val_calc_mre_K: 0.7594\n",
+      "Epoch 390/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 63.9371 - acc: 0.5388 - calc_mre_K: 0.7805 - val_loss: 62.6159 - val_acc: 0.5489 - val_calc_mre_K: 0.7644\n",
+      "Epoch 391/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 63.9531 - acc: 0.5416 - calc_mre_K: 0.7807 - val_loss: 63.5771 - val_acc: 0.5757 - val_calc_mre_K: 0.7761\n",
+      "Epoch 392/500\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "48000/48000 [==============================] - 4s 89us/step - loss: 63.9812 - acc: 0.5422 - calc_mre_K: 0.7810 - val_loss: 66.3646 - val_acc: 0.5202 - val_calc_mre_K: 0.8101\n",
+      "Epoch 393/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 64.0568 - acc: 0.5401 - calc_mre_K: 0.7819 - val_loss: 63.6676 - val_acc: 0.5236 - val_calc_mre_K: 0.7772\n",
+      "Epoch 394/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 63.8626 - acc: 0.5412 - calc_mre_K: 0.7796 - val_loss: 63.4561 - val_acc: 0.5449 - val_calc_mre_K: 0.7746\n",
+      "Epoch 395/500\n",
+      "48000/48000 [==============================] - 5s 96us/step - loss: 63.7314 - acc: 0.5402 - calc_mre_K: 0.7780 - val_loss: 61.5841 - val_acc: 0.5626 - val_calc_mre_K: 0.7518\n",
+      "Epoch 396/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 64.0630 - acc: 0.5429 - calc_mre_K: 0.7820 - val_loss: 66.1816 - val_acc: 0.5525 - val_calc_mre_K: 0.8079\n",
+      "Epoch 397/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 63.7009 - acc: 0.5435 - calc_mre_K: 0.7776 - val_loss: 63.0505 - val_acc: 0.5558 - val_calc_mre_K: 0.7697\n",
+      "Epoch 398/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 63.8731 - acc: 0.5413 - calc_mre_K: 0.7797 - val_loss: 62.6688 - val_acc: 0.5531 - val_calc_mre_K: 0.7650\n",
+      "Epoch 399/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 63.8840 - acc: 0.5381 - calc_mre_K: 0.7798 - val_loss: 63.7208 - val_acc: 0.5389 - val_calc_mre_K: 0.7778\n",
+      "Epoch 400/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 63.8869 - acc: 0.5419 - calc_mre_K: 0.7799 - val_loss: 62.0936 - val_acc: 0.5513 - val_calc_mre_K: 0.7580\n",
+      "Epoch 401/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 63.4683 - acc: 0.5409 - calc_mre_K: 0.7748 - val_loss: 64.6891 - val_acc: 0.5054 - val_calc_mre_K: 0.7897\n",
+      "Epoch 402/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 63.9841 - acc: 0.5382 - calc_mre_K: 0.7811 - val_loss: 63.7527 - val_acc: 0.5539 - val_calc_mre_K: 0.7782\n",
+      "Epoch 403/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 63.3985 - acc: 0.5426 - calc_mre_K: 0.7739 - val_loss: 63.7781 - val_acc: 0.5418 - val_calc_mre_K: 0.7785\n",
+      "Epoch 404/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 63.6991 - acc: 0.5418 - calc_mre_K: 0.7776 - val_loss: 62.8818 - val_acc: 0.5628 - val_calc_mre_K: 0.7676\n",
+      "Epoch 405/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 63.6168 - acc: 0.5436 - calc_mre_K: 0.7766 - val_loss: 61.8276 - val_acc: 0.5566 - val_calc_mre_K: 0.7547\n",
+      "Epoch 406/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 63.6571 - acc: 0.5435 - calc_mre_K: 0.7771 - val_loss: 63.7143 - val_acc: 0.5596 - val_calc_mre_K: 0.7778\n",
+      "Epoch 407/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 63.4975 - acc: 0.5425 - calc_mre_K: 0.7751 - val_loss: 63.6091 - val_acc: 0.5402 - val_calc_mre_K: 0.7765\n",
+      "Epoch 408/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 63.5238 - acc: 0.5444 - calc_mre_K: 0.7754 - val_loss: 64.8886 - val_acc: 0.5322 - val_calc_mre_K: 0.7921\n",
+      "Epoch 409/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 63.6607 - acc: 0.5432 - calc_mre_K: 0.7771 - val_loss: 65.3851 - val_acc: 0.5188 - val_calc_mre_K: 0.7982\n",
+      "Epoch 410/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 63.4639 - acc: 0.5405 - calc_mre_K: 0.7747 - val_loss: 66.0685 - val_acc: 0.5286 - val_calc_mre_K: 0.8065\n",
+      "Epoch 411/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 63.6701 - acc: 0.5445 - calc_mre_K: 0.7772 - val_loss: 63.7400 - val_acc: 0.5594 - val_calc_mre_K: 0.7781\n",
+      "Epoch 412/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 63.3252 - acc: 0.5443 - calc_mre_K: 0.7730 - val_loss: 63.4371 - val_acc: 0.5573 - val_calc_mre_K: 0.7744\n",
+      "Epoch 413/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 63.2443 - acc: 0.5434 - calc_mre_K: 0.7720 - val_loss: 63.5177 - val_acc: 0.5375 - val_calc_mre_K: 0.7754\n",
+      "Epoch 414/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 63.5812 - acc: 0.5409 - calc_mre_K: 0.7761 - val_loss: 63.5588 - val_acc: 0.5566 - val_calc_mre_K: 0.7759\n",
+      "Epoch 415/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 63.2950 - acc: 0.5465 - calc_mre_K: 0.7726 - val_loss: 65.8164 - val_acc: 0.5233 - val_calc_mre_K: 0.8034\n",
+      "Epoch 416/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 63.2843 - acc: 0.5476 - calc_mre_K: 0.7725 - val_loss: 63.4145 - val_acc: 0.5685 - val_calc_mre_K: 0.7741\n",
+      "Epoch 417/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 63.4371 - acc: 0.5471 - calc_mre_K: 0.7744 - val_loss: 64.2520 - val_acc: 0.5448 - val_calc_mre_K: 0.7843\n",
+      "Epoch 418/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 63.2231 - acc: 0.5446 - calc_mre_K: 0.7718 - val_loss: 64.9360 - val_acc: 0.5492 - val_calc_mre_K: 0.7927\n",
+      "Epoch 419/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 63.3718 - acc: 0.5447 - calc_mre_K: 0.7736 - val_loss: 63.2494 - val_acc: 0.5692 - val_calc_mre_K: 0.7721\n",
+      "Epoch 420/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 63.1882 - acc: 0.5478 - calc_mre_K: 0.7713 - val_loss: 63.2180 - val_acc: 0.5730 - val_calc_mre_K: 0.7717\n",
+      "Epoch 421/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 63.1694 - acc: 0.5456 - calc_mre_K: 0.7711 - val_loss: 64.9662 - val_acc: 0.5632 - val_calc_mre_K: 0.7930\n",
+      "Epoch 422/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 63.2826 - acc: 0.5453 - calc_mre_K: 0.7725 - val_loss: 66.0784 - val_acc: 0.5464 - val_calc_mre_K: 0.8066\n",
+      "Epoch 423/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 63.1978 - acc: 0.5480 - calc_mre_K: 0.7715 - val_loss: 63.8850 - val_acc: 0.5097 - val_calc_mre_K: 0.7798\n",
+      "Epoch 424/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 63.2888 - acc: 0.5478 - calc_mre_K: 0.7726 - val_loss: 63.3085 - val_acc: 0.5179 - val_calc_mre_K: 0.7728\n",
+      "Epoch 425/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 63.0819 - acc: 0.5499 - calc_mre_K: 0.7700 - val_loss: 66.4689 - val_acc: 0.5453 - val_calc_mre_K: 0.8114\n",
+      "Epoch 426/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 63.1534 - acc: 0.5469 - calc_mre_K: 0.7709 - val_loss: 63.7216 - val_acc: 0.5156 - val_calc_mre_K: 0.7779\n",
+      "Epoch 427/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 63.0923 - acc: 0.5428 - calc_mre_K: 0.7702 - val_loss: 63.0729 - val_acc: 0.5262 - val_calc_mre_K: 0.7699\n",
+      "Epoch 428/500\n",
+      "48000/48000 [==============================] - 4s 84us/step - loss: 63.0503 - acc: 0.5480 - calc_mre_K: 0.7697 - val_loss: 64.0143 - val_acc: 0.5393 - val_calc_mre_K: 0.7814\n",
+      "Epoch 429/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 63.0716 - acc: 0.5433 - calc_mre_K: 0.7699 - val_loss: 63.1769 - val_acc: 0.5419 - val_calc_mre_K: 0.7712\n",
+      "Epoch 430/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 63.1196 - acc: 0.5450 - calc_mre_K: 0.7705 - val_loss: 62.7508 - val_acc: 0.5622 - val_calc_mre_K: 0.7660\n",
+      "Epoch 431/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 63.1786 - acc: 0.5470 - calc_mre_K: 0.7712 - val_loss: 62.7403 - val_acc: 0.5818 - val_calc_mre_K: 0.7659\n",
+      "Epoch 432/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 62.9422 - acc: 0.5487 - calc_mre_K: 0.7683 - val_loss: 62.5828 - val_acc: 0.5462 - val_calc_mre_K: 0.7640\n",
+      "Epoch 433/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 62.9373 - acc: 0.5494 - calc_mre_K: 0.7683 - val_loss: 61.2173 - val_acc: 0.5995 - val_calc_mre_K: 0.7473\n",
+      "Epoch 434/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 63.0802 - acc: 0.5490 - calc_mre_K: 0.7700 - val_loss: 63.0379 - val_acc: 0.5218 - val_calc_mre_K: 0.7695\n",
+      "Epoch 435/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 62.7837 - acc: 0.5499 - calc_mre_K: 0.7664 - val_loss: 65.4257 - val_acc: 0.5298 - val_calc_mre_K: 0.7987\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch 436/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 62.9617 - acc: 0.5484 - calc_mre_K: 0.7686 - val_loss: 63.1879 - val_acc: 0.5417 - val_calc_mre_K: 0.7713\n",
+      "Epoch 437/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 62.9831 - acc: 0.5476 - calc_mre_K: 0.7688 - val_loss: 64.3852 - val_acc: 0.5223 - val_calc_mre_K: 0.7860\n",
+      "Epoch 438/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 62.9266 - acc: 0.5473 - calc_mre_K: 0.7681 - val_loss: 61.9711 - val_acc: 0.5267 - val_calc_mre_K: 0.7565\n",
+      "Epoch 439/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 62.8513 - acc: 0.5517 - calc_mre_K: 0.7672 - val_loss: 62.9398 - val_acc: 0.5841 - val_calc_mre_K: 0.7683\n",
+      "Epoch 440/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 62.8628 - acc: 0.5469 - calc_mre_K: 0.7674 - val_loss: 67.3535 - val_acc: 0.5613 - val_calc_mre_K: 0.8222\n",
+      "Epoch 441/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 62.7353 - acc: 0.5435 - calc_mre_K: 0.7658 - val_loss: 62.8966 - val_acc: 0.5655 - val_calc_mre_K: 0.7678\n",
+      "Epoch 442/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 62.9082 - acc: 0.5480 - calc_mre_K: 0.7679 - val_loss: 61.8805 - val_acc: 0.5427 - val_calc_mre_K: 0.7554\n",
+      "Epoch 443/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 62.9298 - acc: 0.5500 - calc_mre_K: 0.7682 - val_loss: 65.6287 - val_acc: 0.5370 - val_calc_mre_K: 0.8011\n",
+      "Epoch 444/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 62.8727 - acc: 0.5515 - calc_mre_K: 0.7675 - val_loss: 61.6960 - val_acc: 0.5603 - val_calc_mre_K: 0.7531\n",
+      "Epoch 445/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 62.8029 - acc: 0.5508 - calc_mre_K: 0.7666 - val_loss: 63.0715 - val_acc: 0.5570 - val_calc_mre_K: 0.7699\n",
+      "Epoch 446/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 62.8655 - acc: 0.5472 - calc_mre_K: 0.7674 - val_loss: 61.8871 - val_acc: 0.5653 - val_calc_mre_K: 0.7555\n",
+      "Epoch 447/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 62.7120 - acc: 0.5492 - calc_mre_K: 0.7655 - val_loss: 61.0344 - val_acc: 0.5486 - val_calc_mre_K: 0.7450\n",
+      "Epoch 448/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 62.5996 - acc: 0.5500 - calc_mre_K: 0.7642 - val_loss: 64.6908 - val_acc: 0.5451 - val_calc_mre_K: 0.7897\n",
+      "Epoch 449/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 62.6295 - acc: 0.5495 - calc_mre_K: 0.7645 - val_loss: 62.6194 - val_acc: 0.5782 - val_calc_mre_K: 0.7644\n",
+      "Epoch 450/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 62.6895 - acc: 0.5475 - calc_mre_K: 0.7653 - val_loss: 63.4787 - val_acc: 0.5042 - val_calc_mre_K: 0.7749\n",
+      "Epoch 451/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 62.7757 - acc: 0.5477 - calc_mre_K: 0.7663 - val_loss: 63.1907 - val_acc: 0.5262 - val_calc_mre_K: 0.7714\n",
+      "Epoch 452/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 62.5516 - acc: 0.5459 - calc_mre_K: 0.7636 - val_loss: 62.8555 - val_acc: 0.5532 - val_calc_mre_K: 0.7673\n",
+      "Epoch 453/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 62.7383 - acc: 0.5495 - calc_mre_K: 0.7658 - val_loss: 61.1741 - val_acc: 0.5792 - val_calc_mre_K: 0.7468\n",
+      "Epoch 454/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 62.6556 - acc: 0.5517 - calc_mre_K: 0.7648 - val_loss: 62.1024 - val_acc: 0.5574 - val_calc_mre_K: 0.7581\n",
+      "Epoch 455/500\n",
+      "48000/48000 [==============================] - 4s 85us/step - loss: 62.7385 - acc: 0.5537 - calc_mre_K: 0.7659 - val_loss: 67.5937 - val_acc: 0.5385 - val_calc_mre_K: 0.8251\n",
+      "Epoch 456/500\n",
+      "48000/48000 [==============================] - 4s 87us/step - loss: 62.5694 - acc: 0.5527 - calc_mre_K: 0.7638 - val_loss: 62.4076 - val_acc: 0.5705 - val_calc_mre_K: 0.7618\n",
+      "Epoch 457/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 62.4690 - acc: 0.5508 - calc_mre_K: 0.7626 - val_loss: 61.1385 - val_acc: 0.5647 - val_calc_mre_K: 0.7463\n",
+      "Epoch 458/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 62.6817 - acc: 0.5487 - calc_mre_K: 0.7652 - val_loss: 63.2684 - val_acc: 0.5538 - val_calc_mre_K: 0.7723\n",
+      "Epoch 459/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 62.5856 - acc: 0.5481 - calc_mre_K: 0.7640 - val_loss: 64.5097 - val_acc: 0.5222 - val_calc_mre_K: 0.7875\n",
+      "Epoch 460/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 62.4463 - acc: 0.5516 - calc_mre_K: 0.7623 - val_loss: 63.3747 - val_acc: 0.5695 - val_calc_mre_K: 0.7736\n",
+      "Epoch 461/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 62.6302 - acc: 0.5470 - calc_mre_K: 0.7645 - val_loss: 62.9308 - val_acc: 0.5500 - val_calc_mre_K: 0.7682\n",
+      "Epoch 462/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 62.4683 - acc: 0.5522 - calc_mre_K: 0.7626 - val_loss: 65.2502 - val_acc: 0.5296 - val_calc_mre_K: 0.7965\n",
+      "Epoch 463/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 62.2696 - acc: 0.5508 - calc_mre_K: 0.7601 - val_loss: 68.4771 - val_acc: 0.5469 - val_calc_mre_K: 0.8359\n",
+      "Epoch 464/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 62.5315 - acc: 0.5486 - calc_mre_K: 0.7633 - val_loss: 62.3337 - val_acc: 0.5630 - val_calc_mre_K: 0.7609\n",
+      "Epoch 465/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 62.2666 - acc: 0.5527 - calc_mre_K: 0.7601 - val_loss: 62.2115 - val_acc: 0.5667 - val_calc_mre_K: 0.7594\n",
+      "Epoch 466/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 62.2846 - acc: 0.5508 - calc_mre_K: 0.7603 - val_loss: 61.7435 - val_acc: 0.5327 - val_calc_mre_K: 0.7537\n",
+      "Epoch 467/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 62.3850 - acc: 0.5493 - calc_mre_K: 0.7615 - val_loss: 61.8067 - val_acc: 0.5828 - val_calc_mre_K: 0.7545\n",
+      "Epoch 468/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 62.1633 - acc: 0.5484 - calc_mre_K: 0.7588 - val_loss: 62.8115 - val_acc: 0.5517 - val_calc_mre_K: 0.7667\n",
+      "Epoch 469/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 62.3095 - acc: 0.5479 - calc_mre_K: 0.7606 - val_loss: 62.8788 - val_acc: 0.5400 - val_calc_mre_K: 0.7676\n",
+      "Epoch 470/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 62.3636 - acc: 0.5493 - calc_mre_K: 0.7613 - val_loss: 64.1609 - val_acc: 0.5325 - val_calc_mre_K: 0.7832\n",
+      "Epoch 471/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 62.1930 - acc: 0.5470 - calc_mre_K: 0.7592 - val_loss: 61.8565 - val_acc: 0.5406 - val_calc_mre_K: 0.7551\n",
+      "Epoch 472/500\n",
+      "48000/48000 [==============================] - 4s 86us/step - loss: 62.2352 - acc: 0.5495 - calc_mre_K: 0.7597 - val_loss: 62.3402 - val_acc: 0.5546 - val_calc_mre_K: 0.7610\n",
+      "Epoch 473/500\n",
+      "48000/48000 [==============================] - 4s 88us/step - loss: 62.1978 - acc: 0.5515 - calc_mre_K: 0.7593 - val_loss: 62.2914 - val_acc: 0.5333 - val_calc_mre_K: 0.7604\n",
+      "Epoch 474/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 62.2312 - acc: 0.5513 - calc_mre_K: 0.7597 - val_loss: 63.8005 - val_acc: 0.5766 - val_calc_mre_K: 0.7788\n",
+      "Epoch 475/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 62.2841 - acc: 0.5508 - calc_mre_K: 0.7603 - val_loss: 61.8833 - val_acc: 0.5530 - val_calc_mre_K: 0.7554\n",
+      "Epoch 476/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 61.9403 - acc: 0.5554 - calc_mre_K: 0.7561 - val_loss: 61.7358 - val_acc: 0.5057 - val_calc_mre_K: 0.7536\n",
+      "Epoch 477/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 62.1294 - acc: 0.5510 - calc_mre_K: 0.7584 - val_loss: 61.6813 - val_acc: 0.5846 - val_calc_mre_K: 0.7529\n",
+      "Epoch 478/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 62.2173 - acc: 0.5518 - calc_mre_K: 0.7595 - val_loss: 61.8593 - val_acc: 0.5552 - val_calc_mre_K: 0.7551\n",
+      "Epoch 479/500\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "48000/48000 [==============================] - 4s 91us/step - loss: 61.9569 - acc: 0.5491 - calc_mre_K: 0.7563 - val_loss: 61.9450 - val_acc: 0.5754 - val_calc_mre_K: 0.7562\n",
+      "Epoch 480/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 62.0828 - acc: 0.5542 - calc_mre_K: 0.7578 - val_loss: 61.5555 - val_acc: 0.5553 - val_calc_mre_K: 0.7514\n",
+      "Epoch 481/500\n",
+      "48000/48000 [==============================] - 5s 95us/step - loss: 62.2471 - acc: 0.5496 - calc_mre_K: 0.7599 - val_loss: 61.6619 - val_acc: 0.5622 - val_calc_mre_K: 0.7527\n",
+      "Epoch 482/500\n",
+      "48000/48000 [==============================] - 5s 94us/step - loss: 61.9085 - acc: 0.5514 - calc_mre_K: 0.7557 - val_loss: 61.6090 - val_acc: 0.5518 - val_calc_mre_K: 0.7521\n",
+      "Epoch 483/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 61.8669 - acc: 0.5516 - calc_mre_K: 0.7552 - val_loss: 61.4986 - val_acc: 0.5506 - val_calc_mre_K: 0.7507\n",
+      "Epoch 484/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 62.1775 - acc: 0.5490 - calc_mre_K: 0.7590 - val_loss: 60.8926 - val_acc: 0.5535 - val_calc_mre_K: 0.7433\n",
+      "Epoch 485/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 61.9914 - acc: 0.5523 - calc_mre_K: 0.7567 - val_loss: 61.3315 - val_acc: 0.5763 - val_calc_mre_K: 0.7487\n",
+      "Epoch 486/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 62.0607 - acc: 0.5483 - calc_mre_K: 0.7576 - val_loss: 62.3196 - val_acc: 0.5768 - val_calc_mre_K: 0.7607\n",
+      "Epoch 487/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 62.0490 - acc: 0.5506 - calc_mre_K: 0.7574 - val_loss: 61.5019 - val_acc: 0.5028 - val_calc_mre_K: 0.7508\n",
+      "Epoch 488/500\n",
+      "48000/48000 [==============================] - 4s 90us/step - loss: 61.7370 - acc: 0.5556 - calc_mre_K: 0.7536 - val_loss: 63.0062 - val_acc: 0.5869 - val_calc_mre_K: 0.7691\n",
+      "Epoch 489/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 61.8909 - acc: 0.5535 - calc_mre_K: 0.7555 - val_loss: 61.6162 - val_acc: 0.5173 - val_calc_mre_K: 0.7522\n",
+      "Epoch 490/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 61.8927 - acc: 0.5517 - calc_mre_K: 0.7555 - val_loss: 62.0988 - val_acc: 0.5433 - val_calc_mre_K: 0.7580\n",
+      "Epoch 491/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 61.7267 - acc: 0.5489 - calc_mre_K: 0.7535 - val_loss: 66.1199 - val_acc: 0.5568 - val_calc_mre_K: 0.8071\n",
+      "Epoch 492/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 61.9329 - acc: 0.5524 - calc_mre_K: 0.7560 - val_loss: 60.1876 - val_acc: 0.5375 - val_calc_mre_K: 0.7347\n",
+      "Epoch 493/500\n",
+      "48000/48000 [==============================] - 4s 93us/step - loss: 61.8536 - acc: 0.5521 - calc_mre_K: 0.7550 - val_loss: 60.8207 - val_acc: 0.5505 - val_calc_mre_K: 0.7424\n",
+      "Epoch 494/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 61.7824 - acc: 0.5514 - calc_mre_K: 0.7542 - val_loss: 61.3688 - val_acc: 0.5508 - val_calc_mre_K: 0.7491\n",
+      "Epoch 495/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 61.9479 - acc: 0.5465 - calc_mre_K: 0.7562 - val_loss: 62.4645 - val_acc: 0.5052 - val_calc_mre_K: 0.7625\n",
+      "Epoch 496/500\n",
+      "48000/48000 [==============================] - 4s 92us/step - loss: 61.6786 - acc: 0.5523 - calc_mre_K: 0.7529 - val_loss: 60.7725 - val_acc: 0.5481 - val_calc_mre_K: 0.7419\n",
+      "Epoch 497/500\n",
+      "48000/48000 [==============================] - 4s 91us/step - loss: 61.7278 - acc: 0.5503 - calc_mre_K: 0.7535 - val_loss: 62.8576 - val_acc: 0.5615 - val_calc_mre_K: 0.7673\n",
+      "Epoch 498/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 61.7395 - acc: 0.5499 - calc_mre_K: 0.7537 - val_loss: 63.1650 - val_acc: 0.5298 - val_calc_mre_K: 0.7711\n",
+      "Epoch 499/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 61.7780 - acc: 0.5507 - calc_mre_K: 0.7541 - val_loss: 61.8293 - val_acc: 0.5465 - val_calc_mre_K: 0.7548\n",
+      "Epoch 500/500\n",
+      "48000/48000 [==============================] - 4s 89us/step - loss: 61.6831 - acc: 0.5530 - calc_mre_K: 0.7530 - val_loss: 65.0075 - val_acc: 0.5553 - val_calc_mre_K: 0.7935\n"
+     ]
+    }
+   ],
+   "source": [
+    "x_t, x_v, y_t, y_v = train_test_split(x_train, y_train, test_size=0.2, random_state=42)\n",
+    "history = model.fit(x_t, y_t,\n",
+    "                    batch_size=32,\n",
+    "                    epochs=500, \n",
+    "                    verbose=1,\n",
+    "                    validation_data=(x_v, y_v))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-01T09:03:03.979057Z",
+     "start_time": "2018-09-01T03:46:11.564087Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Fitting 3 folds for each of 6 candidates, totalling 18 fits\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.03125). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.03125). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.03125). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.03125). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.03125). Is this intended?\n",
+      "  force_init=force_init)\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.03125). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.03125). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.03125). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.03125). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.015625). Is this intended?\n",
+      "  force_init=force_init)\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.015625). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.015625). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.015625). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.015625). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.015625). Is this intended?\n",
+      "  force_init=force_init)\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.015625). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.015625). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.015625). Is this intended?\n",
+      "  force_init=force_init)\n",
+      "[Parallel(n_jobs=1)]: Done  18 out of  18 | elapsed: 285.2min finished\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:89: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  train_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/keras/backend/mxnet_backend.py:92: UserWarning: MXNet Backend performs best with `channels_first` format. Using `channels_last` will significantly reduce performance due to the Transpose operations. For performance improvement, please use this API`keras.utils.to_channels_first(x_input)`to transform `channels_last` data to `channels_first` format and also please change the `image_data_format` in `keras.json` to `channels_first`.Note: `x_input` is a Numpy tensor or a list of Numpy tensorRefer to: https://github.com/awslabs/keras-apache-mxnet/tree/master/docs/mxnet_backend/performance_guide.md\n",
+      "  test_symbol = func(*args, **kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/mxnet/module/bucketing_module.py:408: UserWarning: Optimizer created manually outside Module but rescale_grad is not normalized to 1.0/batch_size/num_workers (1.0 vs. 0.03125). Is this intended?\n",
+      "  force_init=force_init)\n"
+     ]
+    }
+   ],
+   "source": [
+    "from sklearn.model_selection import GridSearchCV\n",
+    "from keras.models import Sequential\n",
+    "from keras.layers import Dense\n",
+    "from keras.wrappers.scikit_learn import KerasClassifier\n",
+    "import scnets as scn\n",
+    "#model = KerasClassifier(build_fn=scn.fullycon, in_size=8, out_size=250, N_gpus=1, epochs=500, verbose=0)\n",
+    "\n",
+    "model = KerasClassifier(build_fn=scn.conv1dmodel, \n",
+    "                        in_size=8, \n",
+    "                        out_size=256, \n",
+    "                        ker_size=3,\n",
+    "                        epochs=500, \n",
+    "                        verbose=0)\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "param_grid = dict(ker_size=[3,5,7], batch_size=[32,64])\n",
+    "grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1, verbose=1)\n",
+    "grid_result = grid.fit(x_train, y_train)\n",
+    "\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-01T09:03:28.929994Z",
+     "start_time": "2018-09-01T09:03:28.920585Z"
+    }
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "GridSearchCV(cv=None, error_score='raise',\n",
+       "       estimator=<keras.wrappers.scikit_learn.KerasClassifier object at 0x7f8cb83b3cc0>,\n",
+       "       fit_params=None, iid=True, n_jobs=1,\n",
+       "       param_grid={'ker_size': [3, 5, 7], 'batch_size': [32, 64]},\n",
+       "       pre_dispatch='2*n_jobs', refit=True, return_train_score='warn',\n",
+       "       scoring=None, verbose=1)"
+      ]
+     },
+     "execution_count": 4,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "grid_result"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-09-01T09:03:36.168906Z",
+     "start_time": "2018-09-01T09:03:36.153369Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/sklearn/utils/deprecation.py:122: FutureWarning: You are accessing a training score ('split0_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n",
+      "  warnings.warn(*warn_args, **warn_kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/sklearn/utils/deprecation.py:122: FutureWarning: You are accessing a training score ('split1_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n",
+      "  warnings.warn(*warn_args, **warn_kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/sklearn/utils/deprecation.py:122: FutureWarning: You are accessing a training score ('split2_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n",
+      "  warnings.warn(*warn_args, **warn_kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/sklearn/utils/deprecation.py:122: FutureWarning: You are accessing a training score ('mean_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n",
+      "  warnings.warn(*warn_args, **warn_kwargs)\n",
+      "/home/hegder/anaconda3/envs/deep/lib/python3.6/site-packages/sklearn/utils/deprecation.py:122: FutureWarning: You are accessing a training score ('std_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n",
+      "  warnings.warn(*warn_args, **warn_kwargs)\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "{'mean_fit_time': array([1236.77363722, 1263.8373781 , 1283.07971772,  617.23694984,\n",
+       "         644.64875857,  630.75466394]),\n",
+       " 'std_fit_time': array([15.23634435,  1.04774932, 70.7173362 , 19.87266061,  3.13235316,\n",
+       "        19.13357172]),\n",
+       " 'mean_score_time': array([1.9509182 , 2.09144211, 2.07234033, 1.05850196, 1.09700545,\n",
+       "        1.07024908]),\n",
+       " 'std_score_time': array([0.09494565, 0.09207867, 0.09335411, 0.04954752, 0.04209056,\n",
+       "        0.05320864]),\n",
+       " 'param_batch_size': masked_array(data=[32, 32, 32, 64, 64, 64],\n",
+       "              mask=[False, False, False, False, False, False],\n",
+       "        fill_value='?',\n",
+       "             dtype=object),\n",
+       " 'param_ker_size': masked_array(data=[3, 5, 7, 3, 5, 7],\n",
+       "              mask=[False, False, False, False, False, False],\n",
+       "        fill_value='?',\n",
+       "             dtype=object),\n",
+       " 'params': [{'batch_size': 32, 'ker_size': 3},\n",
+       "  {'batch_size': 32, 'ker_size': 5},\n",
+       "  {'batch_size': 32, 'ker_size': 7},\n",
+       "  {'batch_size': 64, 'ker_size': 3},\n",
+       "  {'batch_size': 64, 'ker_size': 5},\n",
+       "  {'batch_size': 64, 'ker_size': 7}],\n",
+       " 'split0_test_score': array([0.00225, 0.0017 , 0.0027 , 0.00225, 0.00205, 0.0024 ]),\n",
+       " 'split1_test_score': array([0.0021 , 0.0023 , 0.0023 , 0.0019 , 0.002  , 0.00165]),\n",
+       " 'split2_test_score': array([0.00325, 0.00215, 0.0021 , 0.00245, 0.0024 , 0.0024 ]),\n",
+       " 'mean_test_score': array([0.00253333, 0.00205   , 0.00236667, 0.0022    , 0.00215   ,\n",
+       "        0.00215   ]),\n",
+       " 'std_test_score': array([0.00051045, 0.00025495, 0.00024944, 0.0002273 , 0.00017795,\n",
+       "        0.00035355]),\n",
+       " 'rank_test_score': array([1, 6, 2, 3, 4, 4], dtype=int32),\n",
+       " 'split0_train_score': array([0.001875, 0.001625, 0.002525, 0.002175, 0.0019  , 0.002275]),\n",
+       " 'split1_train_score': array([0.0027  , 0.00275 , 0.00245 , 0.0022  , 0.002125, 0.002475]),\n",
+       " 'split2_train_score': array([0.0024  , 0.00205 , 0.0019  , 0.0021  , 0.00225 , 0.001775]),\n",
+       " 'mean_train_score': array([0.002325  , 0.00214167, 0.00229167, 0.00215833, 0.00209167,\n",
+       "        0.002175  ]),\n",
+       " 'std_train_score': array([3.40954542e-04, 4.63830668e-04, 2.78637558e-04, 4.24918293e-05,\n",
+       "        1.44817893e-04, 2.94392029e-04])}"
+      ]
+     },
+     "execution_count": 5,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "grid_result.cv_results_"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "{'mean_fit_time': array([ 893.33654523,  898.53078222, 1119.14394736, 1130.1775128 ,\n",
+    "         956.51246222,  964.45365715, 1209.3984166 , 1582.91039157,\n",
+    "        1394.26560704, 1616.26630108, 1266.47200227, 1116.83488099,\n",
+    "        1205.42738708, 1201.92515103, 1210.92550143]),\n",
+    " 'std_fit_time': array([  3.08891285,   6.81113186, 212.20371026, 238.93357922,\n",
+    "          6.97112622,  18.87349827, 223.00445851,  57.7875855 ,\n",
+    "        100.70476936,  43.95356933, 134.83849082,  11.29690679,\n",
+    "          5.42330543,   6.19267952,   4.92641743]),\n",
+    " 'mean_score_time': array([1.63011034, 1.61093879, 1.66868186, 1.48953597, 1.43644238,\n",
+    "        1.57432818, 1.69542178, 1.78519742, 1.65484571, 1.89959979,\n",
+    "        1.76437203, 1.6481727 , 1.7160941 , 1.75274881, 1.71138732]),\n",
+    " 'std_score_time': array([0.02974069, 0.08114865, 0.04441992, 0.06436249, 0.04163585,\n",
+    "        0.03811946, 0.09529008, 0.0232277 , 0.0417081 , 0.07397102,\n",
+    "        0.14329706, 0.06546494, 0.07330068, 0.05566151, 0.052498  ]),\n",
+    " 'param_N_hidden': masked_array(data=[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5],\n",
+    "              mask=[False, False, False, False, False, False, False, False,\n",
+    "                    False, False, False, False, False, False, False],\n",
+    "        fill_value='?',\n",
+    "             dtype=object),\n",
+    " 'param_N_neurons': masked_array(data=[250, 500, 1000, 250, 500, 1000, 250, 500, 1000, 250,\n",
+    "                    500, 1000, 250, 500, 1000],\n",
+    "              mask=[False, False, False, False, False, False, False, False,\n",
+    "                    False, False, False, False, False, False, False],\n",
+    "        fill_value='?',\n",
+    "             dtype=object),\n",
+    " 'params': [{'N_hidden': 1, 'N_neurons': 250},\n",
+    "  {'N_hidden': 1, 'N_neurons': 500},\n",
+    "  {'N_hidden': 1, 'N_neurons': 1000},\n",
+    "  {'N_hidden': 2, 'N_neurons': 250},\n",
+    "  {'N_hidden': 2, 'N_neurons': 500},\n",
+    "  {'N_hidden': 2, 'N_neurons': 1000},\n",
+    "  {'N_hidden': 3, 'N_neurons': 250},\n",
+    "  {'N_hidden': 3, 'N_neurons': 500},\n",
+    "  {'N_hidden': 3, 'N_neurons': 1000},\n",
+    "  {'N_hidden': 4, 'N_neurons': 250},\n",
+    "  {'N_hidden': 4, 'N_neurons': 500},\n",
+    "  {'N_hidden': 4, 'N_neurons': 1000},\n",
+    "  {'N_hidden': 5, 'N_neurons': 250},\n",
+    "  {'N_hidden': 5, 'N_neurons': 500},\n",
+    "  {'N_hidden': 5, 'N_neurons': 1000}],\n",
+    " 'split0_test_score': array([0.00235, 0.00225, 0.0025 , 0.0024 , 0.0026 , 0.0021 , 0.00165,\n",
+    "        0.0023 , 0.00255, 0.00255, 0.0024 , 0.0027 , 0.0021 , 0.00225,\n",
+    "        0.0022 ]),\n",
+    " 'split1_test_score': array([0.0024 , 0.00225, 0.0022 , 0.0022 , 0.00235, 0.0022 , 0.0021 ,\n",
+    "        0.00195, 0.00215, 0.0022 , 0.00185, 0.00195, 0.002  , 0.00195,\n",
+    "        0.0021 ]),\n",
+    " 'split2_test_score': array([0.00255, 0.00315, 0.00275, 0.00295, 0.00355, 0.0032 , 0.00275,\n",
+    "        0.0031 , 0.0028 , 0.00305, 0.00305, 0.00355, 0.00275, 0.00285,\n",
+    "        0.00305]),\n",
+    " 'mean_test_score': array([0.00243333, 0.00255   , 0.00248333, 0.00251667, 0.00283333,\n",
+    "        0.0025    , 0.00216667, 0.00245   , 0.0025    , 0.0026    ,\n",
+    "        0.00243333, 0.00273333, 0.00228333, 0.00235   , 0.00245   ]),\n",
+    " 'std_test_score': array([8.49836586e-05, 4.24264069e-04, 2.24845626e-04, 3.17104960e-04,\n",
+    "        5.16935414e-04, 4.96655481e-04, 4.51540573e-04, 4.81317636e-04,\n",
+    "        2.67706307e-04, 3.48807492e-04, 4.90464632e-04, 6.53622385e-04,\n",
+    "        3.32498956e-04, 3.74165739e-04, 4.26223728e-04]),\n",
+    " 'rank_test_score': array([11,  4,  8,  5,  1,  6, 15,  9,  6,  3, 11,  2, 14, 13,  9],\n",
+    "       dtype=int32),\n",
+    " 'split0_train_score': array([0.00255 , 0.0024  , 0.0023  , 0.00255 , 0.00255 , 0.002375,\n",
+    "        0.002125, 0.002625, 0.0025  , 0.002775, 0.002625, 0.0027  ,\n",
+    "        0.0023  , 0.00245 , 0.002275]),\n",
+    " 'split1_train_score': array([0.002975, 0.0025  , 0.00255 , 0.0028  , 0.0029  , 0.002475,\n",
+    "        0.002575, 0.0027  , 0.002575, 0.002375, 0.002475, 0.002475,\n",
+    "        0.002325, 0.002425, 0.002375]),\n",
+    " 'split2_train_score': array([0.00215 , 0.0022  , 0.00215 , 0.002375, 0.00205 , 0.002175,\n",
+    "        0.00215 , 0.0022  , 0.002   , 0.00235 , 0.00235 , 0.002525,\n",
+    "        0.00215 , 0.002175, 0.002175]),\n",
+    " 'mean_train_score': array([0.00255833, 0.00236667, 0.00233333, 0.002575  , 0.0025    ,\n",
+    "        0.00234167, 0.00228333, 0.00250833, 0.00235833, 0.0025    ,\n",
+    "        0.00248333, 0.00256667, 0.00225833, 0.00235   , 0.002275  ]),\n",
+    " 'std_train_score': array([3.36856382e-04, 1.24721913e-04, 1.64991582e-04, 1.74403746e-04,\n",
+    "        3.48807492e-04, 1.24721913e-04, 2.06491862e-04, 2.20164080e-04,\n",
+    "        2.55223214e-04, 1.94722024e-04, 1.12422813e-04, 9.64653075e-05,\n",
+    "        7.72801541e-05, 1.24163870e-04, 8.16496581e-05])}\n",
+    "        \n",
+    " \n",
+    " {'mean_fit_time': array([  685.01906315,  1809.28454868,   336.60541034,   878.23016135]),\n",
+    " 'mean_score_time': array([ 1.38006322,  1.27389534,  0.6934317 ,  0.69225407]),\n",
+    " 'mean_test_score': array([ 0.00241667,  0.00251667,  0.00243333,  0.00261667]),\n",
+    " 'mean_train_score': array([ 0.00245833,  0.00236667,  0.00248333,  0.00253333]),\n",
+    " 'param_batch_size': masked_array(data = [32 32 64 64],\n",
+    "              mask = [False False False False],\n",
+    "        fill_value = ?),\n",
+    " 'param_epochs': masked_array(data = [200 500 200 500],\n",
+    "              mask = [False False False False],\n",
+    "        fill_value = ?),\n",
+    " 'params': ({'batch_size': 32, 'epochs': 200},\n",
+    "  {'batch_size': 32, 'epochs': 500},\n",
+    "  {'batch_size': 64, 'epochs': 200},\n",
+    "  {'batch_size': 64, 'epochs': 500}),\n",
+    " 'rank_test_score': array([4, 2, 3, 1], dtype=int32),\n",
+    " 'split0_test_score': array([ 0.0021 ,  0.00225,  0.00215,  0.00225]),\n",
+    " 'split0_train_score': array([ 0.00235 ,  0.0023  ,  0.002625,  0.002575]),\n",
+    " 'split1_test_score': array([ 0.00225,  0.00225,  0.00215,  0.00235]),\n",
+    " 'split1_train_score': array([ 0.002675,  0.002725,  0.002675,  0.002825]),\n",
+    " 'split2_test_score': array([ 0.0029 ,  0.00305,  0.003  ,  0.00325]),\n",
+    " 'split2_train_score': array([ 0.00235 ,  0.002075,  0.00215 ,  0.0022  ]),\n",
+    " 'std_fit_time': array([  27.85582158,  121.41697465,    1.58335506,   11.64839192]),\n",
+    " 'std_score_time': array([ 0.01602076,  0.06291871,  0.03384719,  0.05541393]),\n",
+    " 'std_test_score': array([ 0.00034721,  0.00037712,  0.00040069,  0.00044969]),\n",
+    " 'std_train_score': array([ 0.00015321,  0.00026952,  0.00023658,  0.00025685])}\n",
+    "        \n",
+    "        \n",
+    "        'mean_fit_time': array([1236.77363722, 1263.8373781 , 1283.07971772,  617.23694984,\n",
+    "         644.64875857,  630.75466394]),\n",
+    " 'std_fit_time': array([15.23634435,  1.04774932, 70.7173362 , 19.87266061,  3.13235316,\n",
+    "        19.13357172]),\n",
+    " 'mean_score_time': array([1.9509182 , 2.09144211, 2.07234033, 1.05850196, 1.09700545,\n",
+    "        1.07024908]),\n",
+    " 'std_score_time': array([0.09494565, 0.09207867, 0.09335411, 0.04954752, 0.04209056,\n",
+    "        0.05320864]),\n",
+    " 'param_batch_size': masked_array(data=[32, 32, 32, 64, 64, 64],\n",
+    "              mask=[False, False, False, False, False, False],\n",
+    "        fill_value='?',\n",
+    "             dtype=object),\n",
+    " 'param_ker_size': masked_array(data=[3, 5, 7, 3, 5, 7],\n",
+    "              mask=[False, False, False, False, False, False],\n",
+    "        fill_value='?',\n",
+    "             dtype=object),\n",
+    " 'params': [{'batch_size': 32, 'ker_size': 3},\n",
+    "  {'batch_size': 32, 'ker_size': 5},\n",
+    "  {'batch_size': 32, 'ker_size': 7},\n",
+    "  {'batch_size': 64, 'ker_size': 3},\n",
+    "  {'batch_size': 64, 'ker_size': 5},\n",
+    "  {'batch_size': 64, 'ker_size': 7}],\n",
+    " 'split0_test_score': array([0.00225, 0.0017 , 0.0027 , 0.00225, 0.00205, 0.0024 ]),\n",
+    " 'split1_test_score': array([0.0021 , 0.0023 , 0.0023 , 0.0019 , 0.002  , 0.00165]),\n",
+    " 'split2_test_score': array([0.00325, 0.00215, 0.0021 , 0.00245, 0.0024 , 0.0024 ]),\n",
+    " 'mean_test_score': array([0.00253333, 0.00205   , 0.00236667, 0.0022    , 0.00215   ,\n",
+    "        0.00215   ]),\n",
+    " 'std_test_score': array([0.00051045, 0.00025495, 0.00024944, 0.0002273 , 0.00017795,\n",
+    "        0.00035355]),\n",
+    " 'rank_test_score': array([1, 6, 2, 3, 4, 4], dtype=int32),\n",
+    " 'split0_train_score': array([0.001875, 0.001625, 0.002525, 0.002175, 0.0019  , 0.002275]),\n",
+    " 'split1_train_score': array([0.0027  , 0.00275 , 0.00245 , 0.0022  , 0.002125, 0.002475]),\n",
+    " 'split2_train_score': array([0.0024  , 0.00205 , 0.0019  , 0.0021  , 0.00225 , 0.001775]),\n",
+    " 'mean_train_score': array([0.002325  , 0.00214167, 0.00229167, 0.00215833, 0.00209167,\n",
+    "        0.002175  ]),\n",
+    " 'std_train_score': array([3.40954542e-04, 4.63830668e-04, 2.78637558e-04, 4.24918293e-05,\n",
+    "        1.44817893e-04, 2.94392029e-04])}\n",
+    "        "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.6"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

File diff suppressed because it is too large
+ 1257 - 0
.ipynb_checkpoints/inver_sca-checkpoint.ipynb


+ 106 - 0
.ipynb_checkpoints/inverse_trials-checkpoint.ipynb

@@ -0,0 +1,106 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 196,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-08-24T15:02:25.827112Z",
+     "start_time": "2018-08-24T15:02:25.721467Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "import mxnet as mx\n",
+    "from mxnet import nd, autograd, gluon\n",
+    "mx.random.seed(1)\n",
+    "import snlay as snlay\n",
+    "import numpy as np\n",
+    "import matplotlib.pyplot as plt\n",
+    "\n",
+    "\n",
+    "lam_min = 300\n",
+    "lam_max = 1200\n",
+    "size_min = 30\n",
+    "size_max = 70\n",
+    "num_lpoints = 250\n",
+    "\n",
+    "\n",
+    "lams = np.linspace(lam_min, lam_max, num_lpoints)\n",
+    "mats=[3,4,3,4,3,4,3,4]\n",
+    "sizes=np.array([35, 45, 35, 45, 35, 45, 35, 45])\n",
+    "\n",
+    "sz = nd.array(sizes).reshape((8,1))\n",
+    "\n",
+    "spec_calc = snlay.calc_spectrum(sizes, mats, lams)\n",
+    "#plt.plot(lams, spec_calc)\n",
+    "\n",
+    "\n",
+    "\n",
+    "params = [sz]\n",
+    "\n",
+    "\n",
+    "for param in params:\n",
+    "    param.attach_grad()\n",
+    "params\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "def net2(X):\n",
+    "    return nd.abs(nd.sum(sz) - 16)\n",
+    "        \n",
+    "def square_loss2(yhat, y):\n",
+    "    return nd.abs(yhat - y)\n",
+    "\n",
+    "def SGD(params, lr):\n",
+    "    for param in params:\n",
+    "        param[:] = param - lr * param.grad\n",
+    "\n",
+    "epochs = 10\n",
+    "learning_rate = .01\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "for ind in range(200):\n",
+    "    #print(ind)\n",
+    "    with autograd.record():\n",
+    "        loss = net2(ind)\n",
+    "    loss.backward()\n",
+    "    SGD(params, learning_rate)\n",
+    "    \n",
+    "\n",
+    "sz2 = np.array(sz)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.6"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

File diff suppressed because it is too large
+ 41 - 0
.ipynb_checkpoints/pymietest-checkpoint.ipynb


File diff suppressed because it is too large
+ 1161 - 0
.ipynb_checkpoints/resnets-checkpoint.ipynb


File diff suppressed because it is too large
+ 1283 - 0
.ipynb_checkpoints/scatternet-checkpoint.ipynb


+ 6 - 0
.ipynb_checkpoints/test-checkpoint.ipynb

@@ -0,0 +1,6 @@
+{
+ "cells": [],
+ "metadata": {},
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

BIN
__pycache__/scnets.cpython-36.pyc


BIN
datasets/s8_sio2tio2_v2.h5


File diff suppressed because it is too large
+ 652 - 4821
fully_con_hyp_tuning.ipynb


File diff suppressed because it is too large
+ 489 - 804
scatternet.ipynb


+ 42 - 7
scnets.py

@@ -1,6 +1,8 @@
 from keras import backend as K
 from keras.models import Sequential, Model
 from keras.layers import Dense, Dropout
+from keras.layers import Reshape, UpSampling1D, Conv1D
+from keras.layers import Flatten, Activation
 from keras.utils import np_utils, multi_gpu_model
 from keras.regularizers import l2
 from keras.wrappers.scikit_learn import KerasRegressor
@@ -8,11 +10,15 @@ from keras.optimizers import Adam
 import numpy as np
 import matplotlib.pyplot as plt
 
-#function to test performance on testset  
+#function to test performance on testset
 def calc_mre(y_true, y_pred):
     y_err = 100*np.abs(y_true - y_pred)/y_true
     return np.mean(y_err)
 
+#function to test performance on testset
+def calc_mre_K(y_true, y_pred):
+    y_err = 100*np.abs(y_true - y_pred)/y_true
+    return K.mean(y_err)
 
 #naive percentage loss
 def relerr_loss(y_true, y_pred):
@@ -20,6 +26,35 @@ def relerr_loss(y_true, y_pred):
     y_err_f = K.flatten(y_err)
     return K.sum(y_err_f)
 
+def conv1dmodel(in_size=8, out_size=256, ker_size=3):
+    # create model
+    model = Sequential()
+
+    model.add(Dense(out_size, input_dim=in_size, 
+        kernel_initializer='normal',
+        name='first' ))
+    model.add(Activation('relu'))
+    
+    model.add(Reshape((4, 64), name='Reshape1'))
+    model.add(UpSampling1D(size=2, name='Up1'))
+
+    model.add(Conv1D(filters=64, 
+        kernel_size=ker_size, strides=1, padding='same', 
+        dilation_rate=1, name='Conv1', 
+        kernel_initializer='normal'))
+    model.add(Activation('relu'))
+
+    model.add(Conv1D(filters=32, 
+        kernel_size=ker_size, strides=1, padding='same', 
+        dilation_rate=1, name='Conv2',
+        kernel_initializer='normal'))
+    model.add(Activation('relu'))
+
+    model.add(Flatten()) 
+    
+    model.compile(loss=relerr_loss, optimizer='adam', metrics=['accuracy', calc_mre_K])
+    return model
+
 def fullycon( in_size=8, out_size=250, N_hidden=3, N_neurons=250, N_gpus=1):
     """
     Returns a fully-connected model which will take a normalized size vector and return a
@@ -30,14 +65,14 @@ def fullycon( in_size=8, out_size=250, N_hidden=3, N_neurons=250, N_gpus=1):
     N_neurons: number of neurons in each of the hidden layers
     """
     model = Sequential()
-    model.add(Dense(out_size, input_dim=in_size, kernel_initializer='normal', activation='relu', 
-                    name='first' ))
+    model.add(Dense(out_size, input_dim=in_size, kernel_initializer='normal', activation='relu',
+        name='first' ))
     for h in np.arange(N_hidden):
         lname = "H"+str(h)
         model.add(Dense(out_size, kernel_initializer='normal', activation='relu', name=lname ))
-    
+
     model.add(Dense(out_size, kernel_initializer='normal', name='last'))
-    
+
     # Compile model
     if N_gpus == 1:
         model.compile(loss=relerr_loss, optimizer='adam', metrics=['accuracy'])
@@ -46,13 +81,13 @@ def fullycon( in_size=8, out_size=250, N_hidden=3, N_neurons=250, N_gpus=1):
         model.compile(loss=relerr_loss, optimizer='adam', metrics=['accuracy'], context = gpu_list)
     return model
 
-#staging area for new models 
+#staging area for new models
 def plot_training_history(history, red_factor):
     loss, val_loss = history.history['loss'], history.history['val_loss']
     loss = np.asarray(loss)/red_factor
     val_loss = np.asarray(val_loss)/red_factor
     epochs = len(loss)
-    
+
     fig, axs = plt.subplots(1,1, figsize=(5,5))
     axs.semilogy(np.arange(1, epochs + 1), loss, label='train error')
     axs.semilogy(np.arange(1, epochs + 1), val_loss, label='validation error')

Some files were not shown because too many files changed in this diff