From 29646ecb440ba417a94bca0aff5952dfbb9496e7 Mon Sep 17 00:00:00 2001 From: wizardbc Date: Sat, 5 May 2018 02:30:16 +0900 Subject: [PATCH] Update lab-11-5-mnist_cnn_ensemble_layers.py dropout rate = 1 - keep_prob --- lab-11-5-mnist_cnn_ensemble_layers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lab-11-5-mnist_cnn_ensemble_layers.py b/lab-11-5-mnist_cnn_ensemble_layers.py index 81face62..836e0792 100644 --- a/lab-11-5-mnist_cnn_ensemble_layers.py +++ b/lab-11-5-mnist_cnn_ensemble_layers.py @@ -44,7 +44,7 @@ def _build_net(self): pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], padding="SAME", strides=2) dropout1 = tf.layers.dropout(inputs=pool1, - rate=0.7, training=self.training) + rate=0.3, training=self.training) # Convolutional Layer #2 and Pooling Layer #2 conv2 = tf.layers.conv2d(inputs=dropout1, filters=64, kernel_size=[3, 3], @@ -52,7 +52,7 @@ def _build_net(self): pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], padding="SAME", strides=2) dropout2 = tf.layers.dropout(inputs=pool2, - rate=0.7, training=self.training) + rate=0.3, training=self.training) # Convolutional Layer #3 and Pooling Layer #3 conv3 = tf.layers.conv2d(inputs=dropout2, filters=128, kernel_size=[3, 3], @@ -60,7 +60,7 @@ def _build_net(self): pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], padding="SAME", strides=2) dropout3 = tf.layers.dropout(inputs=pool3, - rate=0.7, training=self.training) + rate=0.3, training=self.training) # Dense Layer with Relu flat = tf.reshape(dropout3, [-1, 128 * 4 * 4])