importtensorflowastf# Create TensorFlow object called tensorhello_constant=tf.constant('Hello World!')withtf.Session()assess:# Run the tf.constant operation in the sessionoutput=sess.run(hello_constant)print(output)
Hello, Tensor World!
importtensorflowastf# Create TensorFlow object called hello_constanthello_constant=tf.constant('Hello World!')# A is a 0-dimensional int32 tensorA=tf.constant(1234)# B is a 1-dimensional int32 tensorB=tf.constant([123,456,789])# C is a 2-dimensional int32 tensorC=tf.constant([[123,456,789],[222,333,444]])#withtf.Session()assess:# Run the tf.constant operation in the sessionoutput=sess.run(hello_constant)print(output)
importtensorflowastfx=tf.constant(10)y=tf.constant(2)z=tf.add(5,2)# 7e=tf.subtract(10,4)# 6g=tf.multiply(2,5)# 10# Converting typesh=tf.subtract(tf.cast(tf.constant(2.0),tf.int32),tf.constant(1))# h = tf.subtract(tf.constant(2.0),tf.constant(1)) # Error! 데이터 타입 맞아야함.e=tf.subtract(tf.divide(x,y),tf.cast(tf.constant(1),tf.float64))# TODO: Print z from a sessionwithtf.Session()assess:output=sess.run(z)print(output)
Linear functions in TensorFlow
## Weights and Bias in TensorFlowx=tf.Variable(5)## Function to initialize the state of all the Variable tensorsinit=tf.global_variables_initializer()withtf.Session()assess:sess.run(init)## tf.truncated_normal()n_features=120n_labels=5weights=tf.Variable(tf.truncated_normal((n_features,n_labels)))## tf.zeros()n_labels=5bias=tf.Variable(tf.zeros(n_labels))#
The tf.global_variables_initializer() call returns an operation that will initialize all TensorFlow variables from the graph.
Using the tf.Variable class allows us to change the weights and bias, but an initial value needs to be chosen.
choosing weights from a normal distribution prevents any one weight from overwhelming other weights. You’ll use the tf.truncated_normal() function to generate random numbers from a normal distribution.
The tf.zeros() function returns a tensor with all zeros.
TensorFlow Softmax
We’re using TensorFlow to build neural networks and, appropriately, there’s a function for calculating softmax.
x=tf.nn.softmax([2.0,1.0,0.2])
One-Hot Encoding With Scikit-Learn
importnumpyasnpfromsklearnimportpreprocessing# Example labelslabels=np.array([1,5,3,2,1,4,2,1,3])# Create the encoderlb=preprocessing.LabelBinarizer()# Here the encoder finds the classes and assigns one-hot vectors lb.fit(labels)# And finally, transform the labels into one-hot encoded vectorslb.transform(labels)>>>array([[1,0,0,0,0],[0,0,0,0,1],[0,0,1,0,0],[0,1,0,0,0],[1,0,0,0,0],[0,0,0,1,0],[0,1,0,0,0],[1,0,0,0,0],[0,0,1,0,0]])
Cross Entropy in TensorFlow
# Solution is available in the other "solution.py" tabimporttensorflowastfsoftmax_data=[0.7,0.2,0.1]one_hot_data=[1.0,0.0,0.0]softmax=tf.placeholder(tf.float32)one_hot=tf.placeholder(tf.float32)cross_entropy=-tf.reduce_sum(tf.multiply(one_hot,tf.log(softmax)))#ouput=Nonewithtf.Session()assession:output=session.run(cross_entropy,feed_dict={one_hot:one_hot_data,softmax:softmax_data})print(output)
Epochs
fromtensorflow.examples.tutorials.mnistimportinput_dataimporttensorflowastfimportnumpyasnp#from helper import batches # Helper function created in Mini-batching sectiondefbatches(batch_size,features,labels):"""
Create batches of features and labels
:param batch_size: The batch size
:param features: List of features
:param labels: List of labels
:return: Batches of (Features, Labels)
"""assertlen(features)==len(labels)outout_batches=[]sample_size=len(features)forstart_iinrange(0,sample_size,batch_size):end_i=start_i+batch_sizebatch=[features[start_i:end_i],labels[start_i:end_i]]outout_batches.append(batch)returnoutout_batchesdefprint_epoch_stats(epoch_i,sess,last_features,last_labels):"""
Print cost and validation accuracy of an epoch
"""current_cost=sess.run(cost,feed_dict={features:last_features,labels:last_labels})valid_accuracy=sess.run(accuracy,feed_dict={features:valid_features,labels:valid_labels})print('Epoch: {:<4} - Cost: {:<8.3} Valid Accuracy: {:<5.3}'.format(epoch_i,current_cost,valid_accuracy))n_input=784# MNIST data input (img shape: 28*28)n_classes=10# MNIST total classes (0-9 digits)# Import MNIST datamnist=input_data.read_data_sets('./MNIST_data',one_hot=True)# The features are already scaled and the data is shuffledtrain_features=mnist.train.imagesvalid_features=mnist.validation.imagestest_features=mnist.test.imagestrain_labels=mnist.train.labels.astype(np.float32)valid_labels=mnist.validation.labels.astype(np.float32)test_labels=mnist.test.labels.astype(np.float32)# Features and Labelsfeatures=tf.placeholder(tf.float32,[None,n_input])labels=tf.placeholder(tf.float32,[None,n_classes])# Weights & biasweights=tf.Variable(tf.random_normal([n_input,n_classes]))bias=tf.Variable(tf.random_normal([n_classes]))# Logits - xW + blogits=tf.add(tf.matmul(features,weights),bias)# Define loss and optimizerlearning_rate=tf.placeholder(tf.float32)cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=labels))optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)# Calculate accuracycorrect_prediction=tf.equal(tf.argmax(logits,1),tf.argmax(labels,1))accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))init=tf.global_variables_initializer()batch_size=128epochs=100learn_rate=0.01train_batches=batches(batch_size,train_features,train_labels)withtf.Session()assess:sess.run(init)# Training cycleforepoch_iinrange(epochs):# Loop over all batchesforbatch_features,batch_labelsintrain_batches:train_feed_dict={features:batch_features,labels:batch_labels,learning_rate:learn_rate}sess.run(optimizer,feed_dict=train_feed_dict)# Print cost and validation accuracy of an epochprint_epoch_stats(epoch_i,sess,batch_features,batch_labels)# Calculate accuracy for test datasettest_accuracy=sess.run(accuracy,feed_dict={features:test_features,labels:test_labels})print('Test Accuracy: {}'.format(test_accuracy))'''
Output:
Test Accuracy: 0.8707000017166138
'''
TensorFlow ReLUs
# Quiz Solution# Note: You can't run code in this tabimporttensorflowastfoutput=Nonehidden_layer_weights=[[0.1,0.2,0.4],[0.4,0.6,0.6],[0.5,0.9,0.1],[0.8,0.2,0.8]]out_weights=[[0.1,0.6],[0.2,0.1],[0.7,0.9]]# Weights and biasesweights=[tf.Variable(hidden_layer_weights),tf.Variable(out_weights)]biases=[tf.Variable(tf.zeros(3)),tf.Variable(tf.zeros(2))]# Inputfeatures=tf.Variable([[1.0,2.0,3.0,4.0],[-1.0,-2.0,-3.0,-4.0],[11.0,12.0,13.0,14.0]])# TODO: Create Modelhidden_layer=tf.add(tf.matmul(features,weights[0]),biases[0])hidden_layer=tf.nn.relu(hidden_layer)logits=tf.add(tf.matmul(hidden_layer,weights[1]),biases[1])# TODO: Print session resultswithtf.Session()assess:sess.run(tf.global_variables_initializer())print(sess.run(logits))
Multilater Perceptron
fromtensorflow.examples.tutorials.mnistimportinput_datamnist=input_data.read_data_sets(".",one_hot=True,reshape=False)importtensorflowastf# Parameterslearning_rate=0.001training_epochs=20batch_size=128# Decrease batch size if you don't have enough memorydisplay_step=1n_input=784# MNIST data input (img shape: 28*28)n_classes=10# MNIST total classes (0-9 digits)n_hidden_layer=256# layer number of features# Store layers weight & biasweights={'hidden_layer':tf.Variable(tf.random_normal([n_input,n_hidden_layer])),'out':tf.Variable(tf.random_normal([n_hidden_layer,n_classes]))}biases={'hidden_layer':tf.Variable(tf.random_normal([n_hidden_layer])),'out':tf.Variable(tf.random_normal([n_classes]))}# tf Graph inputx=tf.placeholder("float",[None,28,28,1])y=tf.placeholder("float",[None,n_classes])x_flat=tf.reshape(x,[-1,n_input])# Hidden layer with RELU activationlayer_1=tf.add(tf.matmul(x_flat,weights['hidden_layer']),biases['hidden_layer'])layer_1=tf.nn.relu(layer_1)# Output layer with linear activationlogits=tf.matmul(layer_1,weights['out'])+biases['out']# Define loss and optimizercost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y))optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)# Initializing the variablesinit=tf.global_variables_initializer()# Launch the graphwithtf.Session()assess:sess.run(init)# Training cycleforepochinrange(training_epochs):total_batch=int(mnist.train.num_examples/batch_size)# Loop over all batchesforiinrange(total_batch):batch_x,batch_y=mnist.train.next_batch(batch_size)# Run optimization op (backprop) and cost op (to get loss value)sess.run(optimizer,feed_dict={x:batch_x,y:batch_y})# Display logs per epoch stepifepoch%display_step==0:c=sess.run(cost,feed_dict={x:batch_x,y:batch_y})print("Epoch:",'%04d'%(epoch+1),"cost=", \
"{:.9f}".format(c))print("Optimization Finished!")# Test modelcorrect_prediction=tf.equal(tf.argmax(logits,1),tf.argmax(y,1))# Calculate accuracyaccuracy=tf.reduce_mean(tf.cast(correct_prediction,"float"))# Decrease test_size if you don't have enough memorytest_size=256print("Accuracy:",accuracy.eval({x:mnist.test.images[:test_size],y:mnist.test.labels[:test_size]}))
Saving Variables
importtensorflowastf# The file path to save the datasave_file='./model.ckpt'# Two Tensor Variables: weights and biasweights=tf.Variable(tf.truncated_normal([2,3]))bias=tf.Variable(tf.truncated_normal([3]))# Class used to save and/or restore Tensor Variablessaver=tf.train.Saver()withtf.Session()assess:# Initialize all the Variablessess.run(tf.global_variables_initializer())# Show the values of weights and biasprint('Weights:')print(sess.run(weights))print('Bias:')print(sess.run(bias))# Save the modelsaver.save(sess,save_file)
Loading Variables
# Remove the previous weights and biastf.reset_default_graph()# Two Variables: weights and biasweights=tf.Variable(tf.truncated_normal([2,3]))bias=tf.Variable(tf.truncated_normal([3]))# Class used to save and/or restore Tensor Variablessaver=tf.train.Saver()withtf.Session()assess:# Load the weights and biassaver.restore(sess,save_file)# Show the values of weights and biasprint('Weight:')print(sess.run(weights))print('Bias:')print(sess.run(bias))
Save a Trained Model
# Remove previous Tensors and Operationstf.reset_default_graph()fromtensorflow.examples.tutorials.mnistimportinput_dataimportnumpyasnplearning_rate=0.001n_input=784# MNIST data input (img shape: 28*28)n_classes=10# MNIST total classes (0-9 digits)# Import MNIST datamnist=input_data.read_data_sets('.',one_hot=True)# Features and Labelsfeatures=tf.placeholder(tf.float32,[None,n_input])labels=tf.placeholder(tf.float32,[None,n_classes])# Weights & biasweights=tf.Variable(tf.random_normal([n_input,n_classes]))bias=tf.Variable(tf.random_normal([n_classes]))# Logits - xW + blogits=tf.add(tf.matmul(features,weights),bias)# Define loss and optimizercost=tf.reduce_mean(\
tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=labels))optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\
.minimize(cost)# Calculate accuracycorrect_prediction=tf.equal(tf.argmax(logits,1),tf.argmax(labels,1))accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))# Let's train that model, then save the weightsimportmathsave_file='./train_model.ckpt'batch_size=128n_epochs=100saver=tf.train.Saver()# Launch the graphwithtf.Session()assess:sess.run(tf.global_variables_initializer())# Training cycleforepochinrange(n_epochs):total_batch=math.ceil(mnist.train.num_examples/batch_size)# Loop over all batchesforiinrange(total_batch):batch_features,batch_labels=mnist.train.next_batch(batch_size)sess.run(optimizer,feed_dict={features:batch_features,labels:batch_labels})# Print status for every 10 epochsifepoch%10==0:valid_accuracy=sess.run(accuracy,feed_dict={features:mnist.validation.images,labels:mnist.validation.labels})print('Epoch {:<3} - Validation Accuracy: {}'.format(epoch,valid_accuracy))# Save the modelsaver.save(sess,save_file)print('Trained Model Saved.')
Load a Trained Model
saver=tf.train.Saver()# Launch the graphwithtf.Session()assess:saver.restore(sess,save_file)test_accuracy=sess.run(accuracy,feed_dict={features:mnist.test.images,labels:mnist.test.labels})print('Test Accuracy: {}'.format(test_accuracy))
Setting name property
importtensorflowastftf.reset_default_graph()save_file='model.ckpt'# Two Tensor Variables: weights and biasweights=tf.Variable(tf.truncated_normal([2,3]),name='weights_0')bias=tf.Variable(tf.truncated_normal([3]),name='bias_0')saver=tf.train.Saver()# Print the name of Weights and Biasprint('Save Weights: {}'.format(weights.name))print('Save Bias: {}'.format(bias.name))withtf.Session()assess:sess.run(tf.global_variables_initializer())saver.save(sess,save_file)# Remove the previous weights and biastf.reset_default_graph()# Two Variables: weights and biasbias=tf.Variable(tf.truncated_normal([3]),name='bias_0')weights=tf.Variable(tf.truncated_normal([2,3]),name='weights_0')saver=tf.train.Saver()# Print the name of Weights and Biasprint('Load Weights: {}'.format(weights.name))print('Load Bias: {}'.format(bias.name))withtf.Session()assess:# Load the weights and bias - No Errorsaver.restore(sess,save_file)print('Loaded Weights and Bias successfully.')
Dropout
# Quiz Solution# Note: You can't run code in this tabimporttensorflowastfhidden_layer_weights=[[0.1,0.2,0.4],[0.4,0.6,0.6],[0.5,0.9,0.1],[0.8,0.2,0.8]]out_weights=[[0.1,0.6],[0.2,0.1],[0.7,0.9]]# Weights and biasesweights=[tf.Variable(hidden_layer_weights),tf.Variable(out_weights)]biases=[tf.Variable(tf.zeros(3)),tf.Variable(tf.zeros(2))]# Inputfeatures=tf.Variable([[0.0,2.0,3.0,4.0],[0.1,0.2,0.3,0.4],[11.0,12.0,13.0,14.0]])# TODO: Create Model with Dropoutkeep_prob=tf.placeholder(tf.float32)hidden_layer=tf.add(tf.matmul(features,weights[0]),biases[0])hidden_layer=tf.nn.relu(hidden_layer)hidden_layer=tf.nn.dropout(hidden_layer,keep_prob)logits=tf.add(tf.matmul(hidden_layer,weights[1]),biases[1])# TODO: Print logits from a sessionwithtf.Session()assess:sess.run(tf.global_variables_initializer())print(sess.run(logits,feed_dict={keep_prob:0.5}))