Skip to content

Commit

Permalink
Added TF csv reader
Browse files Browse the repository at this point in the history
  • Loading branch information
hunkim committed Mar 13, 2017
1 parent 53fb71f commit 52b6a36
Show file tree
Hide file tree
Showing 11 changed files with 105 additions and 40 deletions.
8 changes: 0 additions & 8 deletions data-01-test-score.csv
Original file line number Diff line number Diff line change
@@ -1,11 +1,3 @@
# Test Scores for General Psychology
# http://college.cengage.com/mathematics/brase/understandable_statistics/7e/students/datasets/mlr/frames/frame.html
# The data (X1, X2, X3, X4) are for each student.
# X1 = score on exam #1
# X2 = score on exam #2
# X3 = score on exam #3
# X4 = score on final exam
# EXAM1,EXAM2,EXAM3,FINAL
73,80,75,152
93,88,93,185
89,91,90,180
Expand Down
8 changes: 5 additions & 3 deletions lab-03-X-minimizing_cost_tf_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,22 +25,24 @@

# Get gradients
gvs = optimizer.compute_gradients(cost)
# Optional: modify gradient if necessary
# gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
# Apply gradients
capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
apply_gradients = optimizer.apply_gradients(capped_gvs)
apply_gradients = optimizer.apply_gradients(gvs)

# Launch the graph in a session.
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())

for step in range(100):
print(step, sess.run([gradient, W, gvs, capped_gvs]))
print(step, sess.run([gradient, W, gvs]))
sess.run(apply_gradients)
# Same as sess.run(train)


'''
# Apply gradients
0 [37.333332, 5.0, [(37.333336, 5.0)]]
1 [33.848888, 4.6266665, [(33.848888, 4.6266665)]]
2 [30.689657, 4.2881775, [(30.689657, 4.2881775)]]
Expand Down
8 changes: 3 additions & 5 deletions lab-04-1-multi_variable_linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,12 @@
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())
# Set up feed_dict variables inside the loop.
feed = {x1: x1_data, x2: x2_data, x3: x3_data, Y: y_data}

for step in range(2001):
sess.run(train, feed_dict=feed)
cost_val, hy_val, _ = sess.run([cost, hypothesis, train],
feed_dict={x1: x1_data, x2: x2_data, x3: x3_data, Y: y_data})
if step % 10 == 0:
print(step, "Cost: ", sess.run(cost, feed_dict=feed),
"\nPrediction:\n", sess.run(hypothesis, feed_dict=feed))
print(step, "Cost: ", cost_val, "\nPrediction:\n", hy_val)

'''
0 Cost: 19614.8
Expand Down
8 changes: 3 additions & 5 deletions lab-04-2-multi_variable_matmul_linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,12 @@
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())
# Set up feed_dict variables inside the loop.
feed = {X: x_data, Y: y_data}

for step in range(2001):
sess.run(train, feed_dict=feed)
cost_val, hy_val, _ = sess.run(
[cost, hypothesis, train], feed_dict={X: x_data, Y: y_data})
if step % 10 == 0:
print(step, "Cost: ", sess.run(cost, feed_dict=feed),
"\nPrediction:\n", sess.run(hypothesis, feed_dict=feed))
print(step, "Cost: ", cost_val, "\nPrediction:\n", hy_val)

'''
0 Cost: 7105.46
Expand Down
8 changes: 3 additions & 5 deletions lab-04-3-file_input_linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,12 @@
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())
# Set up feed_dict variables inside the loop.
feed = {X: x_data, Y: y_data}

for step in range(2001):
sess.run(train, feed_dict=feed)
cost_val, hy_val, _ = sess.run(
[cost, hypothesis, train], feed_dict={X: x_data, Y: y_data})
if step % 10 == 0:
print(step, "Cost: ", sess.run(cost, feed_dict=feed),
"\nPrediction:\n", sess.run(hypothesis, feed_dict=feed))
print(step, "Cost: ", cost_val, "\nPrediction:\n", hy_val)

# Ask my score
print("Your score will be ", sess.run(
Expand Down
71 changes: 71 additions & 0 deletions lab-04-4-tf_reader_linear_regression.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
# Lab 4 Multi-variable linear regression
# https://www.tensorflow.org/programmers_guide/reading_data

import tensorflow as tf
import numpy as np
tf.set_random_seed(777) # for reproducibility

filename_queue = tf.train.string_input_producer(
['data-01-test-score.csv'], shuffle=False, name='filename_queue')

reader = tf.TextLineReader()
key, value = reader.read(filename_queue)

# Default values, in case of empty columns. Also specifies the type of the
# decoded result.
record_defaults = [[0.], [0.], [0.], [0.]]
xy = tf.decode_csv(value, record_defaults=record_defaults)

# collect batches of cvs in
train_x_batch, train_y_batch = \
tf.train.batch([xy[0:-1], xy[-1:]], batch_size=10)

# placeholders for a tensor that will be always fed.
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 1])

W = tf.Variable(tf.random_normal([3, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')

# Hypothesis
hypothesis = tf.matmul(X, W) + b

# Simplified cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - Y))

# Minimize
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)

# Launch the graph in a session.
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())

# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)

for step in range(2001):
x_batch, y_batch = sess.run([train_x_batch, train_y_batch])
cost_val, hy_val, _ = sess.run(
[cost, hypothesis, train], feed_dict={X: x_batch, Y: y_batch})
if step % 10 == 0:
print(step, "Cost: ", cost_val, "\nPrediction:\n", hy_val)

coord.request_stop()
coord.join(threads)

# Ask my score
print("Your score will be ",
sess.run(hypothesis, feed_dict={X: [[100, 70, 101]]}))

print("Other scores will be ",
sess.run(hypothesis, feed_dict={X: [[60, 70, 110], [90, 100, 80]]}))

'''
Your score will be [[ 181.73277283]]
Other scores will be [[ 145.86265564]
[ 187.23129272]]
'''
10 changes: 6 additions & 4 deletions lab-07-2-learning_rate_and_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,14 +41,15 @@
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)

for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch

print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))

print('Epoch:', '%04d' % (epoch + 1),
'cost =', '{:.9f}'.format(avg_cost))

print("Learning finished")

Expand All @@ -62,7 +63,8 @@
print("Prediction: ", sess.run(
tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))

plt.imshow(mnist.test.images[r:r + 1].reshape(28, 28), cmap='Greys', interpolation='nearest')
plt.imshow(mnist.test.images[r:r + 1].reshape(28,
28), cmap='Greys', interpolation='nearest')
plt.show()


Expand Down
4 changes: 2 additions & 2 deletions lab-09-1-xor.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
x_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
y_data = np.array([[0], [1], [1], [0]], dtype=np.float32)

X = tf.placeholder(tf.float32,[None,2])
Y = tf.placeholder(tf.float32,[None,1])
X = tf.placeholder(tf.float32, [None, 2])
Y = tf.placeholder(tf.float32, [None, 1])

W = tf.Variable(tf.random_normal([2, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
Expand Down
4 changes: 2 additions & 2 deletions lab-09-2-xor-nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
x_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
y_data = np.array([[0], [1], [1], [0]], dtype=np.float32)

X = tf.placeholder(tf.float32,[None,2])
Y = tf.placeholder(tf.float32,[None,1])
X = tf.placeholder(tf.float32, [None, 2])
Y = tf.placeholder(tf.float32, [None, 1])

W1 = tf.Variable(tf.random_normal([2, 2]), name='weight1')
b1 = tf.Variable(tf.random_normal([2]), name='bias1')
Expand Down
4 changes: 2 additions & 2 deletions lab-09-3-xor-nn-wide-deep.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
x_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
y_data = np.array([[0], [1], [1], [0]], dtype=np.float32)

X = tf.placeholder(tf.float32,[None,2])
Y = tf.placeholder(tf.float32,[None,1])
X = tf.placeholder(tf.float32, [None, 2])
Y = tf.placeholder(tf.float32, [None, 1])

W1 = tf.Variable(tf.random_normal([2, 10]), name='weight1')
b1 = tf.Variable(tf.random_normal([10]), name='bias1')
Expand Down
12 changes: 8 additions & 4 deletions lab-12-5-rnn_stock_prediction.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
def MinMaxScaler(data):
numerator = data - np.min(data, 0)
denominator = np.max(data, 0) - np.min(data, 0)
return numerator / (denominator + 1e-7) # noise term prevents the zero division
# noise term prevents the zero division
return numerator / (denominator + 1e-7)


timesteps = seq_length = 7
Expand All @@ -33,8 +34,10 @@ def MinMaxScaler(data):
# split to train and testing
train_size = int(len(dataY) * 0.7)
test_size = len(dataY) - train_size
trainX, testX = np.array(dataX[0:train_size]), np.array(dataX[train_size:len(dataX)])
trainY, testY = np.array(dataY[0:train_size]), np.array(dataY[train_size:len(dataY)])
trainX, testX = np.array(dataX[0:train_size]), np.array(
dataX[train_size:len(dataX)])
trainY, testY = np.array(dataY[0:train_size]), np.array(
dataY[train_size:len(dataY)])

# input place holders
X = tf.placeholder(tf.float32, [None, seq_length, data_dim])
Expand Down Expand Up @@ -65,7 +68,8 @@ def MinMaxScaler(data):
print(i, step_loss)

testPredict = sess.run(Y_pred, feed_dict={X: testX})
print("RMSE", sess.run(rmse, feed_dict={targets: testY, predictions: testPredict}))
print("RMSE", sess.run(rmse, feed_dict={
targets: testY, predictions: testPredict}))
plt.plot(testY)
plt.plot(testPredict)
plt.xlabel("Time Period")
Expand Down

0 comments on commit 52b6a36

Please sign in to comment.