Skip to content

Commit

Permalink
Added datasets
Browse files Browse the repository at this point in the history
  • Loading branch information
hunkim committed Feb 25, 2017
1 parent dc91442 commit a8187fb
Show file tree
Hide file tree
Showing 13 changed files with 160 additions and 44 deletions.
20 changes: 15 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,13 +1,23 @@
# Lab code (WIP)
This is work in progress. Please do not use them, since they may have many bugs and trial code. We will let you know when it's done.
# Lab code (WIP), but call for comments
This is code for labs covered in TensorFlow basic tutorials (in Korean) at https://www.youtube.com/watch?v=BS6O0zOGX4E&list=PLlMkM4tgfjnLSOjrEJN31gZATbcj_MpUm.
(We also have a plan to record videos in English.)

## Naming rule:
This is work in progress, and may have bugs.
However, we call for your comments and pull requests. Check out our style guide line:

* klab-XX-X-[name].py: Keras labs
* lab-XX-X-[name].py: regular tensorflow labs
* More TF (1.0) style: use more recent and decent TF APIs.
* More Pythonic: fully leverage the powe of python
* Readability (over efficiency): Since it's for instruction purposes, we prefer *readability* over others.
* Understandability (over everything): Understanding TF key concepts is the main goal of this code.

## File naming rule:

* klab-XX-X-[name].py: Keras labs code
* lab-XX-X-[name].py: TensorFlow lab code


## Run test and autopep8
TODO: Need to add more test cases

```bash
python -m unittest discover -s tests;
Expand Down
File renamed without changes.
File renamed without changes.
2 changes: 1 addition & 1 deletion klab-04-3-file_input_linear_regression.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
xy = np.loadtxt('data.csv', delimiter=',')
xy = np.loadtxt('data-01-linear.csv', delimiter=',')
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]

Expand Down
2 changes: 1 addition & 1 deletion klab-04-4-stock_linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import numpy as np
from sklearn.preprocessing import MinMaxScaler

xy = np.loadtxt('stock_daily.csv', delimiter=',')
xy = np.loadtxt('data-02-data-02-stock_daily.csv', delimiter=',')

# very important. It does not work without it.
scaler = MinMaxScaler(feature_range=(0, 1))
Expand Down
2 changes: 1 addition & 1 deletion klab-12-3-rnn_prediction.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import matplotlib.pyplot as plt
# Open,High,Low,Close,Volume
xy = np.loadtxt('stock_daily.csv', delimiter=',')
xy = np.loadtxt('data-02-data-02-stock_daily.csv', delimiter=',')
xy = xy[::-1] # reverse order (chronically ordered)

# very important. It does not work without it.
Expand Down
2 changes: 1 addition & 1 deletion klab-12-4-rnn_2_prediction.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

import matplotlib.pyplot as plt
# Open,High,Low,Close,Volume
xy = np.loadtxt('stock_daily.csv', delimiter=',')
xy = np.loadtxt('data-02-data-02-stock_daily.csv', delimiter=',')
xy = xy[::-1] # reverse order (chronically ordered)

# very important. It does not work without it.
Expand Down
2 changes: 1 addition & 1 deletion lab-04-3-file_input_linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import tensorflow as tf
import numpy as np

xy = np.loadtxt('data.csv', delimiter=',', dtype=np.float32)
xy = np.loadtxt('data-01-linear.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]

Expand Down
45 changes: 24 additions & 21 deletions lab-05-1-logistic_regression_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,8 @@
import tensorflow as tf
import numpy as np

xy = np.loadtxt('data.csv', delimiter=',')
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
x_data = np.array([[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]])
y_data = np.array([[0], [0], [0], [1], [1], [1]])

print(x_data.shape, x_data, len(x_data))
print(y_data.shape, y_data)
Expand All @@ -14,28 +13,32 @@
Y = tf.placeholder(tf.float32)

W = tf.Variable(tf.random_uniform(
shape=[3, 1], minval=-1.0, maxval=1.0, dtype=tf.float32))
# Hypothesis
h = tf.matmul(X, W)
hypothesis = tf.div(1., 1. + tf.exp(-h))
shape=[2, 1], minval=-1.0, maxval=1.0, dtype=tf.float32))

# cost function
# Hypothesis using sigmoid: tf.div(1., 1. + tf.exp(tf.matmul(X, W)))
hypothesis = tf.sigmoid(tf.matmul(X, W))

# Cost function
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y)
* tf.log(1 - hypothesis))

# Minimize
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0001)
train = optimizer.minimize(cost)
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)

# Initialize variable
init = tf.global_variables_initializer()
# Accuracy computation
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32) # True if hypothesis>0.5 else False
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))

# Launch graph
sess = tf.Session()
sess.run(init)

for step in range(2001):
sess.run(train, feed_dict={X: x_data, Y: y_data})
if step % 20 == 0:
print(step, sess.run(cost, feed_dict={
X: x_data, Y: y_data}), sess.run(W))
with tf.Session() as sess:
# Initialize tensorflow variables
sess.run(tf.global_variables_initializer())

for step in range(2001):
sess.run(train, feed_dict={X: x_data, Y: y_data})
if step % 200 == 0:
print(step, sess.run(cost, feed_dict={
X: x_data, Y: y_data}), sess.run(W))

# Accuracy report
h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict={X: x_data, Y: y_data})
print("\nHypothesis: ", h, "\nCorrect: ", c, "\nAccuracy: ", a)
1 change: 1 addition & 0 deletions lab-06-1-softmax_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
Y = tf.placeholder("float", [None, 3])

W = tf.Variable(tf.zeros([3, 3]))

# Softmax
hypothesis = tf.nn.softmax(tf.matmul(X, W))
# tf.nn.softmax computes softmax activations
Expand Down
59 changes: 59 additions & 0 deletions lab-06-2-softmax_zoo_classifier.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# Lab 6 Softmax Classifier
import tensorflow as tf
import numpy as np

x_data = np.array([[1, 2, 1], [1, 3, 2], [1, 3, 4], [1, 5, 5],
[1, 7, 5], [1, 2, 5], [1, 6, 6], [1, 7, 7]], dtype=np.float32)
y_data = np.array([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 1, 0],
[0, 1, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]], dtype=np.float32)

X = tf.placeholder("float", [None, 3])
Y = tf.placeholder("float", [None, 3])

W = tf.Variable(tf.zeros([3, 3]))

# Softmax
hypothesis = tf.nn.softmax(tf.matmul(X, W))
# tf.nn.softmax computes softmax activations
# softmax = exp(logits) / reduce_sum(exp(logits), dim)

# Cross entropy cost
cost = tf.reduce_mean(-tf.reduce_sum(Y *
tf.log(hypothesis), axis=1))

optimizer = tf.train.GradientDescentOptimizer(
learning_rate=0.1).minimize(cost)

init = tf.global_variables_initializer()

# Launch graph
with tf.Session() as sess:
sess.run(init)

for step in range(2001):
sess.run(optimizer, feed_dict={X: x_data, Y: y_data})
if step % 200 == 0:
print(step, sess.run(cost, feed_dict={
X: x_data, Y: y_data}), sess.run(W))

print('--------------')

# Testing & One-hot encoding
a = sess.run(hypothesis, feed_dict={X: [[1, 11, 7]]})
print(a, sess.run(tf.arg_max(a, 1)))

print('--------------')

b = sess.run(hypothesis, feed_dict={X: [[1, 3, 4]]})
print(a, sess.run(tf.arg_max(b, 1)))

print('--------------')

c = sess.run(hypothesis, feed_dict={X: [[1, 1, 0]]})
print(a, sess.run(tf.arg_max(c, 1)))

print('--------------')

all = sess.run(hypothesis, feed_dict={
X: [[1, 11, 7], [1, 3, 4], [1, 1, 0]]})
print(all, sess.run(tf.arg_max(all, 1)))
23 changes: 10 additions & 13 deletions lab-09-1-xor.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,38 +9,35 @@
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)


W = tf.Variable(tf.random_uniform(
shape=[2, 1], minval=-1.0, maxval=1.0, dtype=tf.float32))

# Hypothesis
h = tf.matmul(X, W)
hypothesis = tf.div(1., 1. + tf.exp(-h))
# Hypothesis using sigmoid: tf.div(1., 1. + tf.exp(tf.matmul(X, W)))
hypothesis = tf.sigmoid(tf.matmul(X, W))

# Cost function
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y)
* tf.log(1 - hypothesis))

train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)

# Accuracy computation
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32) # True if hypothesis>0.5 else False
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))

# Initialize variables
init = tf.global_variables_initializer()

# Launch graph
with tf.Session() as sess:
sess.run(init)

for step in range(1001):
for step in range(5001):
sess.run(train, feed_dict={X: x_data, Y: y_data})
if step % 200 == 0:
print(step, sess.run(cost, feed_dict={
X: x_data, Y: y_data}), sess.run(W))

# Test model
correct_prediction = tf.equal(tf.floor(hypothesis + 0.5), Y)

# Accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
print(sess.run([hypothesis, tf.floor(hypothesis + 0.5),
correct_prediction, accuracy], feed_dict={X: x_data, Y: y_data}))
print("Accuracy: ", accuracy.eval({X: x_data, Y: y_data}))
# Accuracy report
h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict={X: x_data, Y: y_data})
print("\nHypothesis: ", h, "\nCorrect: ", c, "\nAccuracy: ", a)
46 changes: 46 additions & 0 deletions lab-09-2-xor-nn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# Lab 9 XOR
# This example does not work
import tensorflow as tf
import numpy as np

x_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
y_data = np.array([[0], [1], [1], [0]], dtype=np.float32)

X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)


W = tf.Variable(tf.random_uniform(
shape=[2, 1], minval=-1.0, maxval=1.0, dtype=tf.float32))

# Hypothesis
h = tf.matmul(X, W)
hypothesis = tf.div(1., 1. + tf.exp(-h))

# Cost function
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y)
* tf.log(1 - hypothesis))

train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)

# Initialize variables
init = tf.global_variables_initializer()

# Launch graph
with tf.Session() as sess:
sess.run(init)

for step in range(1001):
sess.run(train, feed_dict={X: x_data, Y: y_data})
if step % 200 == 0:
print(step, sess.run(cost, feed_dict={
X: x_data, Y: y_data}), sess.run(W))

# Test model
correct_prediction = tf.equal(tf.floor(hypothesis + 0.5), Y)

# Accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
print(sess.run([hypothesis, tf.floor(hypothesis + 0.5),
correct_prediction, accuracy], feed_dict={X: x_data, Y: y_data}))
print("Accuracy: ", accuracy.eval({X: x_data, Y: y_data}))

0 comments on commit a8187fb

Please sign in to comment.