Skip to content

Commit

Permalink
Exercise answer and style changes (hunkim#163)
Browse files Browse the repository at this point in the history
* Small typos corrected.

* Exercise answer

* Remove useless variable

* Change some comments, change some style
  • Loading branch information
Jeff-HOU authored and hunkim committed May 13, 2017
1 parent e3f39aa commit c2d19cb
Show file tree
Hide file tree
Showing 12 changed files with 109 additions and 13 deletions.
5 changes: 3 additions & 2 deletions lab-09-1-xor.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# Lab 9 XOR
# This example does not work
import tensorflow as tf
import numpy as np

tf.set_random_seed(777) # for reproducibility
learning_rate = 0.1

x_data = [[0, 0],
[0, 1],
Expand All @@ -28,7 +29,7 @@
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
tf.log(1 - hypothesis))

train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
train = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)

# Accuracy computation
# True if hypothesis>0.5 else False
Expand Down
86 changes: 86 additions & 0 deletions lab-09-2-xor-nn-back_prop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
# Lab 9 XOR-back_prop
import tensorflow as tf
import numpy as np

tf.set_random_seed(777) # for reproducibility
learning_rate = 0.1

x_data = [[0, 0],
[0, 1],
[1, 0],
[1, 1]]
y_data = [[0],
[1],
[1],
[0]]
x_data = np.array(x_data, dtype=np.float32)
y_data = np.array(y_data, dtype=np.float32)

X = tf.placeholder(tf.float32, [None, 2])
Y = tf.placeholder(tf.float32, [None, 1])

W1 = tf.Variable(tf.random_normal([2, 2]), name='weight1')
b1 = tf.Variable(tf.random_normal([2]), name='bias1')
layer1 = tf.sigmoid(tf.matmul(X, W1) + b1)

W2 = tf.Variable(tf.random_normal([2, 1]), name='weight2')
b2 = tf.Variable(tf.random_normal([1]), name='bias2')
hypothesis = tf.sigmoid(tf.matmul(layer1, W2) + b2)

# cost/loss function
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
tf.log(1 - hypothesis))

def sigmoidGradient(z):
return tf.multiply(tf.sigmoid(z), (1 - tf.sigmoid(z)))

diff = hypothesis - Y

d_l2 = tf.multiply(diff, sigmoidGradient(tf.matmul(layer1, W2) + b2))
d_b2 = d_l2
d_W2 = tf.matmul(tf.transpose(layer1), d_l2)

d_l1 = tf.multiply(tf.matmul(d_l2, tf.transpose(W2)), sigmoidGradient(tf.matmul(X, W1) + b1))
d_b1 = d_l1
d_W1 = tf.matmul(tf.transpose(X), d_l1)

step = [
tf.assign(W2, W2 - learning_rate * d_W2),
tf.assign(b2, b2 - learning_rate * tf.reduce_mean(d_b2, axis=[0])),
tf.assign(W1, W1 - learning_rate * d_W1),
tf.assign(b1, b1 - learning_rate * tf.reduce_mean(d_b1, axis=[0]))
]

# Accuracy computation
# True if hypothesis > 0.5 else False
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))

# Launch graph
with tf.Session() as sess:
# Initialize TensorFlow variables
sess.run(tf.global_variables_initializer())

for i in range(10001):
sess.run([step, cost], feed_dict={X: x_data, Y: y_data})
if i % 1000 == 0:
print(i, sess.run(cost, feed_dict={
X: x_data, Y: y_data}), sess.run([W1, W2]))

# Accuracy report
h, c, a = sess.run([hypothesis, predicted, accuracy],
feed_dict={X: x_data, Y: y_data})
print("\nHypothesis: ", h, "\nCorrect: ", c, "\nAccuracy: ", a)


'''
Hypothesis: [[ 0.07884014]
[ 0.88706875]
[ 0.94088489]
[ 0.04933683]]
Correct: [[ 0.]
[ 1.]
[ 1.]
[ 0.]]
Accuracy: 1.0
'''
4 changes: 3 additions & 1 deletion lab-09-2-xor-nn.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
# Lab 9 XOR
import tensorflow as tf
import numpy as np

tf.set_random_seed(777) # for reproducibility
learning_rate = 0.1

x_data = [[0, 0],
[0, 1],
Expand Down Expand Up @@ -29,7 +31,7 @@
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
tf.log(1 - hypothesis))

train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
train = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)

# Accuracy computation
# True if hypothesis>0.5 else False
Expand Down
4 changes: 3 additions & 1 deletion lab-09-3-xor-nn-wide-deep.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
# Lab 9 XOR
import tensorflow as tf
import numpy as np

tf.set_random_seed(777) # for reproducibility
learning_rate = 0.1

x_data = [[0, 0],
[0, 1],
Expand Down Expand Up @@ -37,7 +39,7 @@
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
tf.log(1 - hypothesis))

train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
train = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)

# Accuracy computation
# True if hypothesis>0.5 else False
Expand Down
4 changes: 3 additions & 1 deletion lab-09-4-xor_tensorboard.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
# Lab 9 XOR
import tensorflow as tf
import numpy as np

tf.set_random_seed(777) # for reproducibility
learning_rate = 0.01

x_data = [[0, 0],
[0, 1],
Expand Down Expand Up @@ -43,7 +45,7 @@
cost_summ = tf.summary.scalar("cost", cost)

with tf.name_scope("train") as scope:
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
train = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)

# Accuracy computation
# True if hypothesis>0.5 else False
Expand Down
3 changes: 2 additions & 1 deletion lab-12-1-hello-rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
hidden_size = 5 # output from the LSTM. 5 to directly predict one-hot
batch_size = 1 # one sentence
sequence_length = 6 # |ihello| == 6
learning_rate = 0.1

X = tf.placeholder(
tf.float32, [None, sequence_length, input_dim]) # X one-hot
Expand All @@ -45,7 +46,7 @@
sequence_loss = tf.contrib.seq2seq.sequence_loss(
logits=outputs, targets=Y, weights=weights)
loss = tf.reduce_mean(sequence_loss)
train = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss)
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)

prediction = tf.argmax(outputs, axis=2)

Expand Down
3 changes: 2 additions & 1 deletion lab-12-2-char-seq-rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
num_classes = len(char2idx) # final output size (RNN or softmax, etc.)
batch_size = 1 # one sample data, one batch
sequence_length = len(sample) - 1 # number of lstm rollings (unit #)
learning_rate = 0.1

sample_idx = [char2idx[c] for c in sample] # char to index
x_data = [sample_idx[:-1]] # X data sample (0 ~ n-1) hello: hell
Expand All @@ -39,7 +40,7 @@
sequence_loss = tf.contrib.seq2seq.sequence_loss(
logits=outputs, targets=Y, weights=weights)
loss = tf.reduce_mean(sequence_loss)
train = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss)
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)

prediction = tf.argmax(outputs, axis=2)

Expand Down
3 changes: 2 additions & 1 deletion lab-12-3-char-seq-softmax-only.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
num_classes = len(char2idx) # final output size (RNN or softmax, etc.)
batch_size = 1 # one sample data, one batch
sequence_length = len(sample) - 1 # number of lstm rollings (unit #)
learning_rate = 0.1

sample_idx = [char2idx[c] for c in sample] # char to index
x_data = [sample_idx[:-1]] # X data sample (0 ~ n-1) hello: hell
Expand All @@ -38,7 +39,7 @@
sequence_loss = tf.contrib.seq2seq.sequence_loss(
logits=outputs, targets=Y, weights=weights)
loss = tf.reduce_mean(sequence_loss) # mean all sequence loss
train = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss)
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)

prediction = tf.argmax(outputs, axis=2)

Expand Down
3 changes: 2 additions & 1 deletion lab-12-4-rnn_long_char.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
hidden_size = len(char_set)
num_classes = len(char_set)
sequence_length = 10 # Any arbitrary number
learning_rate = 0.1

dataX = []
dataY = []
Expand Down Expand Up @@ -59,7 +60,7 @@
sequence_loss = tf.contrib.seq2seq.sequence_loss(
logits=outputs, targets=Y, weights=weights)
mean_loss = tf.reduce_mean(sequence_loss)
train_op = tf.train.AdamOptimizer(learning_rate=0.1).minimize(mean_loss)
train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(mean_loss)

sess = tf.Session()
sess.run(tf.global_variables_initializer())
Expand Down
2 changes: 1 addition & 1 deletion lab-12-5-rnn_stock_prediction.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def MinMaxScaler(data):


# train Parameters
timesteps = seq_length = 7
seq_length = 7
data_dim = 5
hidden_dim = 10
output_dim = 1
Expand Down
2 changes: 1 addition & 1 deletion lab-13-1-mnist_using_scope.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Lab 7 Learning rate and Evaluation
# Lab 13 Using Scope
import tensorflow as tf
import random

Expand Down
3 changes: 1 addition & 2 deletions lab-13-2-mnist_tensorboard.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Lab 7 Learning rate and Evaluation
# Lab 13 Tensorboard
import tensorflow as tf
import random

Expand All @@ -17,7 +17,6 @@

TB_SUMMARY_DIR = './tb/mnist'


# input place holders
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
Expand Down

0 comments on commit c2d19cb

Please sign in to comment.