Below are the solutions to these exercises on optimization algorithms in Tensorflow.
#################### # # # Exercise 1 # # # #################### library(caret)
## Warning: Installed Rcpp (0.12.14.5) different from Rcpp used to build dplyr (0.12.11). ## Please reinstall dplyr to avoid random crashes or undefined behavior.
x = as.matrix(mtcars$qsec) y = as.matrix(mtcars$vs) trainIndex = createDataPartition(x, p=0.8, list=FALSE,times=1) x_train = as.matrix(x[trainIndex,]) x_test = as.matrix(x[-trainIndex,]) y_train = as.matrix(y[trainIndex,]) y_test = as.matrix(y[-trainIndex,]) X = tf$placeholder(tf$float32, shape(NULL, 1L)) Y = tf$placeholder(tf$float32, shape(NULL, 1L), name = "Y") W = tf$Variable(tf$zeros(shape(1L,1L)), name = "weghts") b = tf$Variable(tf$zeros(shape(1L)), name = "bias") init_op = tf$global_variables_initializer() logit = tf$add(tf$multiply(X, W),b) entropy = tf$nn$sigmoid_cross_entropy_with_logits(labels = Y, logits = logit) loss = tf$reduce_mean(entropy) #################### # # # Exercise 2 # # # #################### optimizer = tf$train$GradientDescentOptimizer(learning_rate = 0.01)$minimize(loss) with(tf$Session() %as% sess, { sess$run(init_op) for( i in 1:100000){ sess$run(optimizer,feed_dict = dict(X=x_train, Y=y_train)) } w_value = sess$run(W) b_value = sess$run(b) }) x = seq(min(x_test), max(x_test), 0.01) y <- function(a){ 1/(1 + exp(- a * as.vector(w_value) - as.vector(b_value))) } plot(x_test, y_test) lines(x, y(x))

#################### # # # Exercise 3 # # # #################### optimizer = tf$train$MomentumOptimizer(learning_rate = 0.01, momentum = 0.1)$minimize(loss) init = tf$initialize_all_variables() with(tf$Session() %as% sess, { sess$run(init) for( i in 1:100000){ sess$run(optimizer,feed_dict = dict(X=x_train, Y=y_train)) } w_value = sess$run(W) b_value = sess$run(b) }) x = seq(min(x_test), max(x_test), 0.01) y <- function(a){ 1/(1 + exp(- a * as.vector(w_value) - as.vector(b_value))) } plot(x_test, y_test) lines(x, y(x))

#################### # # # Exercise 4 # # # #################### optimizer = tf$train$MomentumOptimizer(learning_rate = 0.01, momentum = 0.9)$minimize(loss) init = tf$initialize_all_variables() with(tf$Session() %as% sess, { sess$run(init) for( i in 1:100000){ sess$run(optimizer,feed_dict = dict(X=x_train, Y=y_train)) } w_value = sess$run(W) b_value = sess$run(b) }) x = seq(min(x_test), max(x_test), 0.01) y <- function(a){ 1/(1 + exp(- a * as.vector(w_value) - as.vector(b_value))) } plot(x_test, y_test) lines(x, y(x))

#################### # # # Exercise 5 # # # #################### optimizer = tf$train$MomentumOptimizer(learning_rate = 0.01, momentum = 0.5, use_nesterov = T)$minimize(loss) init = tf$initialize_all_variables() with(tf$Session() %as% sess, { sess$run(init) for( i in 1:100000){ sess$run(optimizer,feed_dict = dict(X=x_train, Y=y_train)) } w_value = sess$run(W) b_value = sess$run(b) }) x = seq(min(x_test), max(x_test), 0.01) y <- function(a){ 1/(1 + exp(- a * as.vector(w_value) - as.vector(b_value))) } plot(x_test, y_test) lines(x, y(x))

#################### # # # Exercise 6 # # # #################### optimizer = tf$train$AdamOptimizer(learning_rate = 0.01, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-08)$minimize(loss) init = tf$initialize_all_variables() with(tf$Session() %as% sess, { sess$run(init) for( i in 1:100000){ sess$run(optimizer,feed_dict = dict(X=x_train, Y=y_train)) } w_value = sess$run(W) b_value = sess$run(b) }) x = seq(min(x_test), max(x_test), 0.01) y <- function(a){ 1/(1 + exp(- a * as.vector(w_value) - as.vector(b_value))) } plot(x_test, y_test) lines(x, y(x))

#################### # # # Exercise 7 # # # #################### optimizer = tf$train$AdadeltaOptimizer(learning_rate = 0.01, rho = 0.1, epsilon = 1e-08)$minimize(loss) init = tf$initialize_all_variables() with(tf$Session() %as% sess, { sess$run(init) for( i in 1:100000){ sess$run(optimizer,feed_dict = dict(X=x_train, Y=y_train)) } w_value = sess$run(W) b_value = sess$run(b) }) x = seq(min(x_test), max(x_test), 0.01) y <- function(a){ 1/(1 + exp(- a * as.vector(w_value) - as.vector(b_value))) } plot(x_test, y_test) lines(x, y(x))

#################### # # # Exercise 8 # # # #################### optimizer = tf$train$AdadeltaOptimizer(learning_rate = 0.01, rho = 0.9, epsilon = 1e-08)$minimize(loss) init = tf$initialize_all_variables() with(tf$Session() %as% sess, { sess$run(init) for( i in 1:100000){ sess$run(optimizer,feed_dict = dict(X=x_train, Y=y_train)) } w_value = sess$run(W) b_value = sess$run(b) }) x = seq(min(x_test), max(x_test), 0.01) y <- function(a){ 1/(1 + exp(- a * as.vector(w_value) - as.vector(b_value))) } plot(x_test, y_test) lines(x, y(x))

#################### # # # Exercise 9 # # # #################### optimizer = tf$train$RMSPropOptimizer(learning_rate = 0.01, decay = 0.9, momentum = 0.1, epsilon = 1e-10)$minimize(loss) init = tf$initialize_all_variables() with(tf$Session() %as% sess, { sess$run(init) for( i in 1:100000){ sess$run(optimizer,feed_dict = dict(X=x_train, Y=y_train)) } w_value = sess$run(W) b_value = sess$run(b) }) x = seq(min(x_test), max(x_test), 0.01) y <- function(a){ 1/(1 + exp(- a * as.vector(w_value) - as.vector(b_value))) } plot(x_test, y_test) lines(x, y(x))

#################### # # # Exercise 10 # # # #################### optimizer = tf$train$AdamOptimizer(learning_rate = 0.01, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8)$minimize(loss) init = tf$initialize_all_variables() with(tf$Session() %as% sess, { sess$run(init) for( i in 1:100000){ sess$run(optimizer,feed_dict = dict(X=x_train, Y=y_train)) } w_value = sess$run(W) b_value = sess$run(b) }) x = seq(min(x_test), max(x_test), 0.01) y <- function(a){ 1/(1 + exp(- a * as.vector(w_value) - as.vector(b_value))) } plot(x_test, y_test) lines(x, y(x))

Leave a Reply