Environment Setting
conda create - n tensorflow python = 3.5
source activate tensorflow
conda install pandas matplotlib jupyter notebook scipy scikit - learn
pip install tensorflow
Hello World !
import tensorflow as tf
# Create TensorFlow object called tensor
hello_constant = tf . constant ( 'Hello World!' )
with tf . Session () as sess :
# Run the tf.constant operation in the session
output = sess . run ( hello_constant )
print ( output )
Hello, Tensor World!
import tensorflow as tf
# Create TensorFlow object called hello_constant
hello_constant = tf . constant ( 'Hello World!' )
# A is a 0-dimensional int32 tensor
A = tf . constant ( 1234 )
# B is a 1-dimensional int32 tensor
B = tf . constant ([ 123 , 456 , 789 ])
# C is a 2-dimensional int32 tensor
C = tf . constant ([ [ 123 , 456 , 789 ], [ 222 , 333 , 444 ] ])
#
with tf . Session () as sess :
# Run the tf.constant operation in the session
output = sess . run ( hello_constant )
print ( output )
Session’s feed_dict
import tensorflow as tf
x = tf . placeholder ( tf . string )
y = tf . placeholder ( tf . int32 )
z = tf . placeholder ( tf . float32 )
with tf . Session () as sess :
output = sess . run ([ x , y , z ], feed_dict = { x : 'Test String' , y : 123 , z : 45.67 })
print ( output )
TensorFlow Math
import tensorflow as tf
x = tf . constant ( 10 )
y = tf . constant ( 2 )
z = tf . add ( 5 , 2 ) # 7
e = tf . subtract ( 10 , 4 ) # 6
g = tf . multiply ( 2 , 5 ) # 10
# Converting types
h = tf . subtract ( tf . cast ( tf . constant ( 2.0 ), tf . int32 ), tf . constant ( 1 ))
# h = tf.subtract(tf.constant(2.0),tf.constant(1)) # Error! 데이터 타입 맞아야함.
e = tf . subtract ( tf . divide ( x , y ), tf . cast ( tf . constant ( 1 ), tf . float64 ))
# TODO: Print z from a session
with tf . Session () as sess :
output = sess . run ( z )
print ( output )
Linear functions in TensorFlow
#
# Weights and Bias in TensorFlow
x = tf . Variable ( 5 )
#
# Function to initialize the state of all the Variable tensors
init = tf . global_variables_initializer ()
with tf . Session () as sess :
sess . run ( init )
#
# tf.truncated_normal()
n_features = 120
n_labels = 5
weights = tf . Variable ( tf . truncated_normal (( n_features , n_labels )))
#
# tf.zeros()
n_labels = 5
bias = tf . Variable ( tf . zeros ( n_labels ))
#
The tf.global_variables_initializer() call returns an operation that will initialize all TensorFlow variables from the graph.
Using the tf.Variable class allows us to change the weights and bias, but an initial value needs to be chosen.
choosing weights from a normal distribution prevents any one weight from overwhelming other weights. You’ll use the tf.truncated_normal() function to generate random numbers from a normal distribution.
The tf.zeros() function returns a tensor with all zeros.
TensorFlow Softmax
We’re using TensorFlow to build neural networks and, appropriately, there’s a function for calculating softmax.
x = tf . nn . softmax ([ 2.0 , 1.0 , 0.2 ])
One-Hot Encoding With Scikit-Learn
import numpy as np
from sklearn import preprocessing
# Example labels
labels = np . array ([ 1 , 5 , 3 , 2 , 1 , 4 , 2 , 1 , 3 ])
# Create the encoder
lb = preprocessing . LabelBinarizer ()
# Here the encoder finds the classes and assigns one-hot vectors
lb . fit ( labels )
# And finally, transform the labels into one-hot encoded vectors
lb . transform ( labels )
>>> array ([[ 1 , 0 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 0 , 1 ],
[ 0 , 0 , 1 , 0 , 0 ],
[ 0 , 1 , 0 , 0 , 0 ],
[ 1 , 0 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 1 , 0 ],
[ 0 , 1 , 0 , 0 , 0 ],
[ 1 , 0 , 0 , 0 , 0 ],
[ 0 , 0 , 1 , 0 , 0 ]])
Cross Entropy in TensorFlow
# Solution is available in the other "solution.py" tab
import tensorflow as tf
softmax_data = [ 0.7 , 0.2 , 0.1 ]
one_hot_data = [ 1.0 , 0.0 , 0.0 ]
softmax = tf . placeholder ( tf . float32 )
one_hot = tf . placeholder ( tf . float32 )
cross_entropy = - tf . reduce_sum ( tf . multiply ( one_hot , tf . log ( softmax )))
#
ouput = None
with tf . Session () as session :
output = session . run ( cross_entropy , feed_dict = { one_hot : one_hot_data , softmax : softmax_data })
print ( output )
Epochs
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
#from helper import batches # Helper function created in Mini-batching section
def batches ( batch_size , features , labels ):
"""
Create batches of features and labels
:param batch_size: The batch size
:param features: List of features
:param labels: List of labels
:return: Batches of (Features, Labels)
"""
assert len ( features ) == len ( labels )
outout_batches = []
sample_size = len ( features )
for start_i in range ( 0 , sample_size , batch_size ):
end_i = start_i + batch_size
batch = [ features [ start_i : end_i ], labels [ start_i : end_i ]]
outout_batches . append ( batch )
return outout_batches
def print_epoch_stats ( epoch_i , sess , last_features , last_labels ):
"""
Print cost and validation accuracy of an epoch
"""
current_cost = sess . run (
cost ,
feed_dict = { features : last_features , labels : last_labels })
valid_accuracy = sess . run (
accuracy ,
feed_dict = { features : valid_features , labels : valid_labels })
print ( 'Epoch: {:<4} - Cost: {:<8.3} Valid Accuracy: {:<5.3}' . format (
epoch_i ,
current_cost ,
valid_accuracy ))
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# Import MNIST data
mnist = input_data . read_data_sets ( './MNIST_data' , one_hot = True )
# The features are already scaled and the data is shuffled
train_features = mnist . train . images
valid_features = mnist . validation . images
test_features = mnist . test . images
train_labels = mnist . train . labels . astype ( np . float32 )
valid_labels = mnist . validation . labels . astype ( np . float32 )
test_labels = mnist . test . labels . astype ( np . float32 )
# Features and Labels
features = tf . placeholder ( tf . float32 , [ None , n_input ])
labels = tf . placeholder ( tf . float32 , [ None , n_classes ])
# Weights & bias
weights = tf . Variable ( tf . random_normal ([ n_input , n_classes ]))
bias = tf . Variable ( tf . random_normal ([ n_classes ]))
# Logits - xW + b
logits = tf . add ( tf . matmul ( features , weights ), bias )
# Define loss and optimizer
learning_rate = tf . placeholder ( tf . float32 )
cost = tf . reduce_mean ( tf . nn . softmax_cross_entropy_with_logits ( logits = logits , labels = labels ))
optimizer = tf . train . GradientDescentOptimizer ( learning_rate = learning_rate ) . minimize ( cost )
# Calculate accuracy
correct_prediction = tf . equal ( tf . argmax ( logits , 1 ), tf . argmax ( labels , 1 ))
accuracy = tf . reduce_mean ( tf . cast ( correct_prediction , tf . float32 ))
init = tf . global_variables_initializer ()
batch_size = 128
epochs = 100
learn_rate = 0.01
train_batches = batches ( batch_size , train_features , train_labels )
with tf . Session () as sess :
sess . run ( init )
# Training cycle
for epoch_i in range ( epochs ):
# Loop over all batches
for batch_features , batch_labels in train_batches :
train_feed_dict = {
features : batch_features ,
labels : batch_labels ,
learning_rate : learn_rate }
sess . run ( optimizer , feed_dict = train_feed_dict )
# Print cost and validation accuracy of an epoch
print_epoch_stats ( epoch_i , sess , batch_features , batch_labels )
# Calculate accuracy for test dataset
test_accuracy = sess . run (
accuracy ,
feed_dict = { features : test_features , labels : test_labels })
print ( 'Test Accuracy: {}' . format ( test_accuracy ))
'''
Output:
Test Accuracy: 0.8707000017166138
'''
TensorFlow ReLUs
# Quiz Solution
# Note: You can't run code in this tab
import tensorflow as tf
output = None
hidden_layer_weights = [
[ 0.1 , 0.2 , 0.4 ],
[ 0.4 , 0.6 , 0.6 ],
[ 0.5 , 0.9 , 0.1 ],
[ 0.8 , 0.2 , 0.8 ]]
out_weights = [
[ 0.1 , 0.6 ],
[ 0.2 , 0.1 ],
[ 0.7 , 0.9 ]]
# Weights and biases
weights = [
tf . Variable ( hidden_layer_weights ),
tf . Variable ( out_weights )]
biases = [
tf . Variable ( tf . zeros ( 3 )),
tf . Variable ( tf . zeros ( 2 ))]
# Input
features = tf . Variable ([[ 1.0 , 2.0 , 3.0 , 4.0 ], [ - 1.0 , - 2.0 , - 3.0 , - 4.0 ], [ 11.0 , 12.0 , 13.0 , 14.0 ]])
# TODO: Create Model
hidden_layer = tf . add ( tf . matmul ( features , weights [ 0 ]), biases [ 0 ])
hidden_layer = tf . nn . relu ( hidden_layer )
logits = tf . add ( tf . matmul ( hidden_layer , weights [ 1 ]), biases [ 1 ])
# TODO: Print session results
with tf . Session () as sess :
sess . run ( tf . global_variables_initializer ())
print ( sess . run ( logits ))
Multilater Perceptron
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data . read_data_sets ( "." , one_hot = True , reshape = False )
import tensorflow as tf
# Parameters
learning_rate = 0.001
training_epochs = 20
batch_size = 128 # Decrease batch size if you don't have enough memory
display_step = 1
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
n_hidden_layer = 256 # layer number of features
# Store layers weight & bias
weights = {
'hidden_layer' : tf . Variable ( tf . random_normal ([ n_input , n_hidden_layer ])),
'out' : tf . Variable ( tf . random_normal ([ n_hidden_layer , n_classes ]))
}
biases = {
'hidden_layer' : tf . Variable ( tf . random_normal ([ n_hidden_layer ])),
'out' : tf . Variable ( tf . random_normal ([ n_classes ]))
}
# tf Graph input
x = tf . placeholder ( "float" , [ None , 28 , 28 , 1 ])
y = tf . placeholder ( "float" , [ None , n_classes ])
x_flat = tf . reshape ( x , [ - 1 , n_input ])
# Hidden layer with RELU activation
layer_1 = tf . add ( tf . matmul ( x_flat , weights [ 'hidden_layer' ]), biases [ 'hidden_layer' ])
layer_1 = tf . nn . relu ( layer_1 )
# Output layer with linear activation
logits = tf . matmul ( layer_1 , weights [ 'out' ]) + biases [ 'out' ]
# Define loss and optimizer
cost = tf . reduce_mean ( tf . nn . softmax_cross_entropy_with_logits ( logits = logits , labels = y ))
optimizer = tf . train . GradientDescentOptimizer ( learning_rate = learning_rate ) . minimize ( cost )
# Initializing the variables
init = tf . global_variables_initializer ()
# Launch the graph
with tf . Session () as sess :
sess . run ( init )
# Training cycle
for epoch in range ( training_epochs ):
total_batch = int ( mnist . train . num_examples / batch_size )
# Loop over all batches
for i in range ( total_batch ):
batch_x , batch_y = mnist . train . next_batch ( batch_size )
# Run optimization op (backprop) and cost op (to get loss value)
sess . run ( optimizer , feed_dict = { x : batch_x , y : batch_y })
# Display logs per epoch step
if epoch % display_step == 0 :
c = sess . run ( cost , feed_dict = { x : batch_x , y : batch_y })
print ( "Epoch:" , ' %04 d' % ( epoch + 1 ), "cost=" , \
"{:.9f}" . format ( c ))
print ( "Optimization Finished!" )
# Test model
correct_prediction = tf . equal ( tf . argmax ( logits , 1 ), tf . argmax ( y , 1 ))
# Calculate accuracy
accuracy = tf . reduce_mean ( tf . cast ( correct_prediction , "float" ))
# Decrease test_size if you don't have enough memory
test_size = 256
print ( "Accuracy:" , accuracy . eval ({ x : mnist . test . images [: test_size ], y : mnist . test . labels [: test_size ]}))
Saving Variables
import tensorflow as tf
# The file path to save the data
save_file = './model.ckpt'
# Two Tensor Variables: weights and bias
weights = tf . Variable ( tf . truncated_normal ([ 2 , 3 ]))
bias = tf . Variable ( tf . truncated_normal ([ 3 ]))
# Class used to save and/or restore Tensor Variables
saver = tf . train . Saver ()
with tf . Session () as sess :
# Initialize all the Variables
sess . run ( tf . global_variables_initializer ())
# Show the values of weights and bias
print ( 'Weights:' )
print ( sess . run ( weights ))
print ( 'Bias:' )
print ( sess . run ( bias ))
# Save the model
saver . save ( sess , save_file )
Loading Variables
# Remove the previous weights and bias
tf . reset_default_graph ()
# Two Variables: weights and bias
weights = tf . Variable ( tf . truncated_normal ([ 2 , 3 ]))
bias = tf . Variable ( tf . truncated_normal ([ 3 ]))
# Class used to save and/or restore Tensor Variables
saver = tf . train . Saver ()
with tf . Session () as sess :
# Load the weights and bias
saver . restore ( sess , save_file )
# Show the values of weights and bias
print ( 'Weight:' )
print ( sess . run ( weights ))
print ( 'Bias:' )
print ( sess . run ( bias ))
Save a Trained Model
# Remove previous Tensors and Operations
tf . reset_default_graph ()
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
learning_rate = 0.001
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# Import MNIST data
mnist = input_data . read_data_sets ( '.' , one_hot = True )
# Features and Labels
features = tf . placeholder ( tf . float32 , [ None , n_input ])
labels = tf . placeholder ( tf . float32 , [ None , n_classes ])
# Weights & bias
weights = tf . Variable ( tf . random_normal ([ n_input , n_classes ]))
bias = tf . Variable ( tf . random_normal ([ n_classes ]))
# Logits - xW + b
logits = tf . add ( tf . matmul ( features , weights ), bias )
# Define loss and optimizer
cost = tf . reduce_mean ( \
tf . nn . softmax_cross_entropy_with_logits ( logits = logits , labels = labels ))
optimizer = tf . train . GradientDescentOptimizer ( learning_rate = learning_rate ) \
. minimize ( cost )
# Calculate accuracy
correct_prediction = tf . equal ( tf . argmax ( logits , 1 ), tf . argmax ( labels , 1 ))
accuracy = tf . reduce_mean ( tf . cast ( correct_prediction , tf . float32 ))
# Let's train that model, then save the weights
import math
save_file = './train_model.ckpt'
batch_size = 128
n_epochs = 100
saver = tf . train . Saver ()
# Launch the graph
with tf . Session () as sess :
sess . run ( tf . global_variables_initializer ())
# Training cycle
for epoch in range ( n_epochs ):
total_batch = math . ceil ( mnist . train . num_examples / batch_size )
# Loop over all batches
for i in range ( total_batch ):
batch_features , batch_labels = mnist . train . next_batch ( batch_size )
sess . run (
optimizer ,
feed_dict = { features : batch_features , labels : batch_labels })
# Print status for every 10 epochs
if epoch % 10 == 0 :
valid_accuracy = sess . run (
accuracy ,
feed_dict = {
features : mnist . validation . images ,
labels : mnist . validation . labels })
print ( 'Epoch {:<3} - Validation Accuracy: {}' . format (
epoch ,
valid_accuracy ))
# Save the model
saver . save ( sess , save_file )
print ( 'Trained Model Saved.' )
Load a Trained Model
saver = tf . train . Saver ()
# Launch the graph
with tf . Session () as sess :
saver . restore ( sess , save_file )
test_accuracy = sess . run (
accuracy ,
feed_dict = { features : mnist . test . images , labels : mnist . test . labels })
print ( 'Test Accuracy: {}' . format ( test_accuracy ))
Setting name property
import tensorflow as tf
tf . reset_default_graph ()
save_file = 'model.ckpt'
# Two Tensor Variables: weights and bias
weights = tf . Variable ( tf . truncated_normal ([ 2 , 3 ]), name = 'weights_0' )
bias = tf . Variable ( tf . truncated_normal ([ 3 ]), name = 'bias_0' )
saver = tf . train . Saver ()
# Print the name of Weights and Bias
print ( 'Save Weights: {}' . format ( weights . name ))
print ( 'Save Bias: {}' . format ( bias . name ))
with tf . Session () as sess :
sess . run ( tf . global_variables_initializer ())
saver . save ( sess , save_file )
# Remove the previous weights and bias
tf . reset_default_graph ()
# Two Variables: weights and bias
bias = tf . Variable ( tf . truncated_normal ([ 3 ]), name = 'bias_0' )
weights = tf . Variable ( tf . truncated_normal ([ 2 , 3 ]) , name = 'weights_0' )
saver = tf . train . Saver ()
# Print the name of Weights and Bias
print ( 'Load Weights: {}' . format ( weights . name ))
print ( 'Load Bias: {}' . format ( bias . name ))
with tf . Session () as sess :
# Load the weights and bias - No Error
saver . restore ( sess , save_file )
print ( 'Loaded Weights and Bias successfully.' )
Dropout
# Quiz Solution
# Note: You can't run code in this tab
import tensorflow as tf
hidden_layer_weights = [
[ 0.1 , 0.2 , 0.4 ],
[ 0.4 , 0.6 , 0.6 ],
[ 0.5 , 0.9 , 0.1 ],
[ 0.8 , 0.2 , 0.8 ]]
out_weights = [
[ 0.1 , 0.6 ],
[ 0.2 , 0.1 ],
[ 0.7 , 0.9 ]]
# Weights and biases
weights = [
tf . Variable ( hidden_layer_weights ),
tf . Variable ( out_weights )]
biases = [
tf . Variable ( tf . zeros ( 3 )),
tf . Variable ( tf . zeros ( 2 ))]
# Input
features = tf . Variable ([[ 0.0 , 2.0 , 3.0 , 4.0 ], [ 0.1 , 0.2 , 0.3 , 0.4 ], [ 11.0 , 12.0 , 13.0 , 14.0 ]])
# TODO: Create Model with Dropout
keep_prob = tf . placeholder ( tf . float32 )
hidden_layer = tf . add ( tf . matmul ( features , weights [ 0 ]), biases [ 0 ])
hidden_layer = tf . nn . relu ( hidden_layer )
hidden_layer = tf . nn . dropout ( hidden_layer , keep_prob )
logits = tf . add ( tf . matmul ( hidden_layer , weights [ 1 ]), biases [ 1 ])
# TODO: Print logits from a session
with tf . Session () as sess :
sess . run ( tf . global_variables_initializer ())
print ( sess . run ( logits , feed_dict = { keep_prob : 0.5 }))
TensorFlow-Examples