forked from hunkim/DeepLearningZeroToAll
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
11 changed files
with
1,059 additions
and
74 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
This file was deleted.
Oops, something went wrong.
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,17 @@ | ||
from keras.models import Sequential | ||
from keras.layers import Dense | ||
import numpy as np | ||
|
||
x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]] | ||
y_data = [[0], [0], [0], [1], [1], [1]] | ||
|
||
model = Sequential() | ||
model.add(Dense(1, input_dim=2, activation='sigmoid')) | ||
|
||
model.compile(loss='binary_crossentropy', optimizer='sgd', lr=0.1) | ||
|
||
model.summary() | ||
model.fit(x_data, y_data, nb_epoch=2000) | ||
|
||
print("2,1", model.predict_classes(np.array([[2, 1]]))) | ||
print("6,5", model.predict_classes(np.array([[6, 5]]))) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,45 @@ | ||
# Lab 5 Logistic Regression Classifier | ||
import tensorflow as tf | ||
import numpy as np | ||
|
||
xy = np.loadtxt('data-03-diabetes.csv', delimiter=',', dtype=np.float32) | ||
x_data = xy[:, 0:-1] | ||
y_data = xy[:, [-1]] | ||
|
||
print(x_data.shape, y_data.shape) | ||
|
||
X = tf.placeholder(tf.float32, shape=[None, 8]) | ||
Y = tf.placeholder(tf.float32, shape=[None, 1]) | ||
|
||
W = tf.Variable(tf.random_uniform( | ||
shape=[8, 1], minval=-1.0, maxval=1.0, dtype=tf.float32)) | ||
|
||
# Hypothesis using sigmoid: tf.div(1., 1. + tf.exp(tf.matmul(X, W))) | ||
hypothesis = tf.sigmoid(tf.matmul(X, W)) | ||
|
||
# Cost function | ||
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) | ||
* tf.log(1 - hypothesis)) | ||
|
||
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost) | ||
|
||
# Accuracy computation | ||
# True if hypothesis>0.5 else False | ||
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32) | ||
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32)) | ||
|
||
# Launch graph | ||
with tf.Session() as sess: | ||
# Initialize TensorFlow variables | ||
sess.run(tf.global_variables_initializer()) | ||
|
||
for step in range(10001): | ||
sess.run(train, feed_dict={X: x_data, Y: y_data}) | ||
if step % 200 == 0: | ||
print(step, sess.run(cost, feed_dict={ | ||
X: x_data, Y: y_data}), sess.run(W)) | ||
|
||
# Accuracy report | ||
h, c, a = sess.run([hypothesis, predicted, accuracy], | ||
feed_dict={X: x_data, Y: y_data}) | ||
print("\nHypothesis: ", h, "\nCorrect (Y): ", c, "\nAccuracy: ", a) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,68 @@ | ||
# Lab 7 Learning rate and Evaluation | ||
import tensorflow as tf | ||
import numpy as np | ||
import random | ||
import matplotlib.pyplot as plt | ||
|
||
from tensorflow.examples.tutorials.mnist import input_data | ||
|
||
tf.set_random_seed(777) # reproducibility | ||
|
||
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) | ||
# Check out https://www.tensorflow.org/get_started/mnist/beginners for | ||
# more information about the mnist dataset | ||
|
||
# parameters | ||
learning_rate = 0.001 | ||
training_epochs = 15 | ||
batch_size = 100 | ||
|
||
# input place holders | ||
X = tf.placeholder(tf.float32, [None, 784]) | ||
Y = tf.placeholder(tf.float32, [None, 10]) | ||
|
||
# weights & bias for nn layers | ||
W = tf.Variable(tf.random_normal([784, 10])) | ||
b = tf.Variable(tf.random_normal([10])) | ||
|
||
hypothesis = tf.matmul(X, W) + b | ||
|
||
# define cost & optimizer | ||
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( | ||
logits=hypothesis, labels=Y)) | ||
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) | ||
|
||
# initialize | ||
sess = tf.Session() | ||
sess.run(tf.global_variables_initializer()) | ||
|
||
# train my model | ||
for epoch in range(training_epochs): | ||
avg_cost = 0 | ||
total_batch = int(mnist.train.num_examples / batch_size) | ||
|
||
for i in range(total_batch): | ||
batch_xs, batch_ys = mnist.train.next_batch(batch_size) | ||
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys}) | ||
avg_cost += sess.run(cost, | ||
feed_dict={X: batch_xs, Y: batch_ys}) / total_batch | ||
|
||
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost)) | ||
|
||
print('Learning Finished!') | ||
|
||
# Test model and check accuracy | ||
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) | ||
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) | ||
print('Accuracy:', sess.run(accuracy, feed_dict={ | ||
X: mnist.test.images, Y: mnist.test.labels})) | ||
|
||
# Get one and predict | ||
r = random.randint(0, mnist.test.num_examples - 1) | ||
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1))) | ||
print("Prediction: ", sess.run( | ||
tf.argmax(hypothesis, 1), {X: mnist.test.images[r:r + 1]})) | ||
|
||
plt.imshow(mnist.test.images[r:r + 1]. | ||
reshape(28, 28), cmap='Greys', interpolation='nearest') | ||
plt.show() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,75 @@ | ||
# Lab 7 Learning rate and Evaluation | ||
import tensorflow as tf | ||
import numpy as np | ||
import random | ||
import matplotlib.pyplot as plt | ||
|
||
from tensorflow.examples.tutorials.mnist import input_data | ||
|
||
tf.set_random_seed(777) # reproducibility | ||
|
||
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) | ||
# Check out https://www.tensorflow.org/get_started/mnist/beginners for | ||
# more information about the mnist dataset | ||
|
||
# parameters | ||
learning_rate = 0.001 | ||
training_epochs = 15 | ||
batch_size = 100 | ||
|
||
# input place holders | ||
X = tf.placeholder(tf.float32, [None, 784]) | ||
Y = tf.placeholder(tf.float32, [None, 10]) | ||
|
||
# weights & bias for nn layers | ||
W1 = tf.Variable(tf.random_normal([784, 256])) | ||
b1 = tf.Variable(tf.random_normal([256])) | ||
L1 = tf.nn.relu(tf.matmul(X, W1) + b1) | ||
|
||
W2 = tf.Variable(tf.random_normal([256, 256])) | ||
b2 = tf.Variable(tf.random_normal([256])) | ||
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2) | ||
|
||
W3 = tf.Variable(tf.random_normal([256, 10])) | ||
b3 = tf.Variable(tf.random_normal([10])) | ||
hypothesis = tf.matmul(L2, W3) + b3 | ||
|
||
# define cost & optimizer | ||
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( | ||
logits=hypothesis, labels=Y)) | ||
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) | ||
|
||
# initialize | ||
sess = tf.Session() | ||
sess.run(tf.global_variables_initializer()) | ||
|
||
# train my model | ||
for epoch in range(training_epochs): | ||
avg_cost = 0 | ||
total_batch = int(mnist.train.num_examples / batch_size) | ||
|
||
for i in range(total_batch): | ||
batch_xs, batch_ys = mnist.train.next_batch(batch_size) | ||
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys}) | ||
avg_cost += sess.run(cost, | ||
feed_dict={X: batch_xs, Y: batch_ys}) / total_batch | ||
|
||
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost)) | ||
|
||
print('Learning Finished!') | ||
|
||
# Test model and check accuracy | ||
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) | ||
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) | ||
print('Accuracy:', sess.run(accuracy, feed_dict={ | ||
X: mnist.test.images, Y: mnist.test.labels})) | ||
|
||
# Get one and predict | ||
r = random.randint(0, mnist.test.num_examples - 1) | ||
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1))) | ||
print("Prediction: ", sess.run( | ||
tf.argmax(hypothesis, 1), {X: mnist.test.images[r:r + 1]})) | ||
|
||
plt.imshow(mnist.test.images[r:r + 1]. | ||
reshape(28, 28), cmap='Greys', interpolation='nearest') | ||
plt.show() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,79 @@ | ||
# Lab 7 Learning rate and Evaluation | ||
import tensorflow as tf | ||
import numpy as np | ||
import random | ||
import matplotlib.pyplot as plt | ||
|
||
from tensorflow.examples.tutorials.mnist import input_data | ||
|
||
tf.set_random_seed(777) # reproducibility | ||
|
||
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) | ||
# Check out https://www.tensorflow.org/get_started/mnist/beginners for | ||
# more information about the mnist dataset | ||
|
||
# parameters | ||
learning_rate = 0.001 | ||
training_epochs = 15 | ||
batch_size = 100 | ||
|
||
# input place holders | ||
X = tf.placeholder(tf.float32, [None, 784]) | ||
Y = tf.placeholder(tf.float32, [None, 10]) | ||
|
||
# weights & bias for nn layers | ||
# http://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow | ||
W1 = tf.get_variable("W1", shape=[784, 256], | ||
initializer=tf.contrib.layers.xavier_initializer()) | ||
b1 = tf.Variable(tf.random_normal([256])) | ||
L1 = tf.nn.relu(tf.matmul(X, W1) + b1) | ||
|
||
W2 = tf.get_variable("W2", shape=[256, 256], | ||
initializer=tf.contrib.layers.xavier_initializer()) | ||
b2 = tf.Variable(tf.random_normal([256])) | ||
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2) | ||
|
||
W3 = tf.get_variable("W3", shape=[256, 10], | ||
initializer=tf.contrib.layers.xavier_initializer()) | ||
b3 = tf.Variable(tf.random_normal([10])) | ||
hypothesis = tf.matmul(L2, W3) + b3 | ||
|
||
# define cost & optimizer | ||
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( | ||
logits=hypothesis, labels=Y)) | ||
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) | ||
|
||
# initialize | ||
sess = tf.Session() | ||
sess.run(tf.global_variables_initializer()) | ||
|
||
# train my model | ||
for epoch in range(training_epochs): | ||
avg_cost = 0 | ||
total_batch = int(mnist.train.num_examples / batch_size) | ||
|
||
for i in range(total_batch): | ||
batch_xs, batch_ys = mnist.train.next_batch(batch_size) | ||
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys}) | ||
avg_cost += sess.run(cost, | ||
feed_dict={X: batch_xs, Y: batch_ys}) / total_batch | ||
|
||
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost)) | ||
|
||
print('Learning Finished!') | ||
|
||
# Test model and check accuracy | ||
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) | ||
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) | ||
print('Accuracy:', sess.run(accuracy, feed_dict={ | ||
X: mnist.test.images, Y: mnist.test.labels})) | ||
|
||
# Get one and predict | ||
r = random.randint(0, mnist.test.num_examples - 1) | ||
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1))) | ||
print("Prediction: ", sess.run( | ||
tf.argmax(hypothesis, 1), {X: mnist.test.images[r:r + 1]})) | ||
|
||
plt.imshow(mnist.test.images[r:r + 1]. | ||
reshape(28, 28), cmap='Greys', interpolation='nearest') | ||
plt.show() |
Oops, something went wrong.