Skip to content

Commit

Permalink
Changed file names
Browse files Browse the repository at this point in the history
  • Loading branch information
hunkim committed Apr 3, 2017
1 parent 7aa833c commit cd57860
Show file tree
Hide file tree
Showing 4 changed files with 339 additions and 0 deletions.
81 changes: 81 additions & 0 deletions lab-09-4-xor_tensorboard.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
# Lab 9 XOR
# This example does not work
import tensorflow as tf
import numpy as np
tf.set_random_seed(777) # for reproducibility

x_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
y_data = np.array([[0], [1], [1], [0]], dtype=np.float32)

X = tf.placeholder(tf.float32, [None, 2], name='x-input')
Y = tf.placeholder(tf.float32, [None, 1], name='y-input')

with tf.name_scope("layer1") as scope:
W1 = tf.Variable(tf.random_normal([2, 2]), name='weight1')
b1 = tf.Variable(tf.random_normal([2]), name='bias1')
layer1 = tf.sigmoid(tf.matmul(X, W1) + b1)

w1_hist = tf.summary.histogram("weights1", W1)
b1_hist = tf.summary.histogram("biases1", b1)
layer1_hist = tf.summary.histogram("layer1", layer1)


with tf.name_scope("layer2") as scope:
W2 = tf.Variable(tf.random_normal([2, 1]), name='weight2')
b2 = tf.Variable(tf.random_normal([1]), name='bias2')
hypothesis = tf.sigmoid(tf.matmul(layer1, W2) + b2)

w2_hist = tf.summary.histogram("weights2", W2)
b2_hist = tf.summary.histogram("biases2", b2)
hypothesis_hist = tf.summary.histogram("hypothesis", hypothesis)

# cost/loss function
with tf.name_scope("cost") as scope:
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
tf.log(1 - hypothesis))
cost_summ = tf.summary.scalar("cost", cost)

with tf.name_scope("train") as scope:
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)

# Accuracy computation
# True if hypothesis>0.5 else False
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
accuracy_summ = tf.summary.scalar("accuracy", accuracy)

# Launch graph
with tf.Session() as sess:
# tensorboard --logdir=./logs/xor_logs
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs/xor_logs_r0_01")
writer.add_graph(sess.graph) # Show the graph

# Initialize TensorFlow variables
sess.run(tf.global_variables_initializer())

for step in range(10001):
summary, _ = sess.run([merged_summary, train], feed_dict={X: x_data, Y: y_data})
writer.add_summary(summary, global_step=step)

if step % 100 == 0:
print(step, sess.run(cost, feed_dict={
X: x_data, Y: y_data}), sess.run([W1, W2]))

# Accuracy report
h, c, a = sess.run([hypothesis, predicted, accuracy],
feed_dict={X: x_data, Y: y_data})
print("\nHypothesis: ", h, "\nCorrect: ", c, "\nAccuracy: ", a)


'''
Hypothesis: [[ 0.01338218]
[ 0.98166394]
[ 0.98809403]
[ 0.01135799]]
Correct: [[ 0.]
[ 1.]
[ 1.]
[ 0.]]
Accuracy: 1.0
'''
52 changes: 52 additions & 0 deletions lab-09-5-linear_back_prop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# http://blog.aloni.org/posts/backprop-with-tensorflow/
# https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b#.b3rvzhx89
# WIP
import tensorflow as tf

tf.set_random_seed(777) # reproducibility

# tf Graph Input
x_data = [[1.], [2.], [3.]]
y_data = [[1.], [2.], [3.]]


# placeholders for a tensor that will be always fed.
X = tf.placeholder(tf.float32, shape=[None, 1])
Y = tf.placeholder(tf.float32, shape=[None, 1])

# Set wrong model weights
W = tf.Variable(tf.truncated_normal([1, 1]))
b = tf.Variable(5.)

# Forward prop
hypothesis = tf.matmul(X, W) + b

# diff
assert hypothesis.shape.as_list() == Y.shape.as_list()
diff = (hypothesis - Y)

# Back prop (chain rule)
d_l1 = diff
d_b = d_l1
d_w = tf.matmul(tf.transpose(X), d_l1)

print(X, W, d_l1, d_w)

# Updating network using gradients
learning_rate = 0.1
step = [
tf.assign(W, W - learning_rate * d_w),
tf.assign(b, b - learning_rate * tf.reduce_mean(d_b)),
]

# 7. Running and testing the training process
RMSE = tf.reduce_mean(tf.square((Y - hypothesis)))

sess = tf.InteractiveSession()
init = tf.global_variables_initializer()
sess.run(init)

for i in range(1000):
print(i, sess.run([step, RMSE], feed_dict={X: x_data, Y: y_data}))

print(sess.run(hypothesis, feed_dict={X: x_data}))
54 changes: 54 additions & 0 deletions lab-09-6-multi-linear_back_prop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
# http://blog.aloni.org/posts/backprop-with-tensorflow/
# https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b#.b3rvzhx89
# WIP
import tensorflow as tf

tf.set_random_seed(777) # reproducibility

# tf Graph Input
x_data = [[73., 80., 75.], [93., 88., 93.],
[89., 91., 90.], [96., 98., 100.], [73., 66., 70.]]
y_data = [[152.], [185.], [180.], [196.], [142.]]

# placeholders for a tensor that will be always fed.
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 1])

# Set wrong model weights
W = tf.Variable(tf.truncated_normal([3, 1]))
b = tf.Variable(5.)

# Forward prop
hypothesis = tf.matmul(X, W) + b

print(hypothesis.shape, Y.shape)

# diff
assert hypothesis.shape.as_list() == Y.shape.as_list()
diff = (hypothesis - Y)

# Back prop (chain rule)
d_l1 = diff
d_b = d_l1
d_w = tf.matmul(tf.transpose(X), d_l1)

print(X, d_l1, d_w)

# Updating network using gradients
learning_rate = 1e-6
step = [
tf.assign(W, W - learning_rate * d_w),
tf.assign(b, b - learning_rate * tf.reduce_mean(d_b)),
]

# 7. Running and testing the training process
RMSE = tf.reduce_mean(tf.square((Y - hypothesis)))

sess = tf.InteractiveSession()
init = tf.global_variables_initializer()
sess.run(init)

for i in range(10000):
print(i, sess.run([step, RMSE], feed_dict={X: x_data, Y: y_data}))

print(sess.run(hypothesis, feed_dict={X: x_data}))
152 changes: 152 additions & 0 deletions lab-09-7-sigmoid_back_prop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
"""
In this file, we will implement back propagations by hands
We will use the Sigmoid Cross Entropy loss function.
This is equivalent to tf.nn.sigmoid_softmax_with_logits(logits, labels)
[References]
1) Tensorflow Document (tf.nn.sigmoid_softmax_with_logits)
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
2) Neural Net Backprop in one slide! by Sung Kim
https://docs.google.com/presentation/d/1_ZmtfEjLmhbuM_PqbDYMXXLAqeWN0HwuhcSKnUQZ6MM/edit#slide=id.g1ec1d04b5a_1_83
3) Back Propagation with Tensorflow by Dan Aloni
http://blog.aloni.org/posts/backprop-with-tensorflow/
4) Yes you should understand backprop by Andrej Karpathy
https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b#.cockptkn7
[Network Architecture]
Input: x
Layer1: x * W + b
Output layer = σ(Layer1)
Loss_i = - y * log(σ(Layer1)) - (1 - y) * log(1 - σ(Layer1))
Loss = tf.reduce_sum(Loss_i)
We want to compute that
dLoss/dW = ???
dLoss/db = ???
please read "Neural Net Backprop in one slide!" for deriving formulas
"""
import tensorflow as tf
import numpy as np
tf.set_random_seed(777) # for reproducibility

# Predicting animal type based on various features
xy = np.loadtxt('data-04-zoo.csv', delimiter=',', dtype=np.float32)
X_data = xy[:, 0:-1]
N = X_data.shape[0]
y_data = xy[:, [-1]]

# y_data has labels from 0 ~ 6
print("y has one of the following values")
print(np.unique(y_data))

# X_data.shape = (101, 16) => 101 samples, 16 features
# y_data.shape = (101, 1) => 101 samples, 1 label
print("Shape of X data: ", X_data.shape)
print("Shape of y data: ", y_data.shape)

nb_classes = 7 # 0 ~ 6

X = tf.placeholder(tf.float32, [None, 16])
y = tf.placeholder(tf.int32, [None, 1]) # 0 ~ 6

target = tf.one_hot(y, nb_classes) # one hot
target = tf.reshape(target, [-1, nb_classes])
target = tf.cast(target, tf.float32)

W = tf.Variable(tf.random_normal([16, nb_classes]), name='weight')
b = tf.Variable(tf.random_normal([nb_classes]), name='bias')


def sigma(x):
# sigmoid function
# σ(x) = 1 / (1 + exp(-x))
return 1. / (1. + tf.exp(-x))


def sigma_prime(x):
# derivative of the sigmoid function
# σ'(x) = σ(x) * (1 - σ(x))
return sigma(x) * (1. - sigma(x))


# Forward propagtion
layer_1 = tf.matmul(X, W) + b
y_pred = sigma(layer_1)

# Loss Function (end of forwad propagation)
loss_i = - target * tf.log(y_pred) - (1. - target) * tf.log(1. - y_pred)
loss = tf.reduce_sum(loss_i)

# Dimension Check
assert y_pred.shape.as_list() == target.shape.as_list()


# Back prop (chain rule)
# How to derive? please read "Neural Net Backprop in one slide!"
d_loss = (y_pred - target) / (y_pred * (1. - y_pred) + 1e-7)
d_sigma = sigma_prime(layer_1)
d_layer = d_loss * d_sigma
d_b = d_layer
d_W = tf.matmul(tf.transpose(X), d_layer)

# Updating network using gradients
learning_rate = 0.01
train_step = [
tf.assign(W, W - learning_rate * d_W),
tf.assign(b, b - learning_rate * tf.reduce_sum(d_b)),
]

# Prediction and Accuracy
prediction = tf.argmax(y_pred, 1)
acct_mat = tf.equal(tf.argmax(y_pred, 1), tf.argmax(target, 1))
acct_res = tf.reduce_mean(tf.cast(acct_mat, tf.float32))

# Launch graph
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())

for step in range(500):
sess.run(train_step, feed_dict={X: X_data, y: y_data})

if step % 10 == 0:
# Within 300 steps, you should see an accuracy of 100%
step_loss, acc = sess.run([loss, acct_res], feed_dict={
X: X_data, y: y_data})
print("Step: {:5}\t Loss: {:10.5f}\t Acc: {:.2%}" .format(
step, step_loss, acc))

# Let's see if we can predict
pred = sess.run(prediction, feed_dict={X: X_data})
for p, y in zip(pred, y_data):
msg = "[{}]\t Prediction: {:d}\t True y: {:d}"
print(msg.format(p == int(y[0]), p, int(y[0])))

"""
Output Example
Step: 0 Loss: 453.74799 Acc: 38.61%
Step: 20 Loss: 95.05664 Acc: 88.12%
Step: 40 Loss: 66.43570 Acc: 93.07%
Step: 60 Loss: 53.09288 Acc: 94.06%
...
Step: 290 Loss: 18.72972 Acc: 100.00%
Step: 300 Loss: 18.24953 Acc: 100.00%
Step: 310 Loss: 17.79592 Acc: 100.00%
...
[True] Prediction: 0 True y: 0
[True] Prediction: 0 True y: 0
[True] Prediction: 3 True y: 3
[True] Prediction: 0 True y: 0
...
"""

0 comments on commit cd57860

Please sign in to comment.