Below are the solutions to these exercises on neural networks.
#################### # # # Exercise 1 # # # #################### library(MASS) data<-biopsy scale.0.1<-function(x){ (x-min(x,na.rm=TRUE))/(max(x,na.rm=TRUE)-min(x,na.rm=TRUE)) } norm.data<-data.frame(lapply(data[,2:10], function(x) scale.0.1(x))) norm.data$class<-data$class str(norm.data)
## 'data.frame': 699 obs. of 10 variables: ## $ V1 : num 0.444 0.444 0.222 0.556 0.333 ... ## $ V2 : num 0 0.333 0 0.778 0 ... ## $ V3 : num 0 0.333 0 0.778 0 ... ## $ V4 : num 0 0.444 0 0 0.222 ... ## $ V5 : num 0.111 0.667 0.111 0.222 0.111 ... ## $ V6 : num 0 1 0.111 0.333 0 ... ## $ V7 : num 0.222 0.222 0.222 0.222 0.222 ... ## $ V8 : num 0 0.111 0 0.667 0 ... ## $ V9 : num 0 0 0 0 0 ... ## $ class: Factor w/ 2 levels "benign","malignant": 1 1 1 1 1 2 1 1 1 1 ...
sum(is.na(norm.data))
## [1] 16
norm.data<-norm.data[complete.cases(norm.data),] set.seed(42) index<-sample(1:nrow(norm.data),round(0.75*nrow(norm.data)),replace=FALSE) train<-norm.data[index,] test<-norm.data[-index,] #################### # # # Exercise 2 # # # #################### library(nnet) cross.val.nnet<-function(train,test,low_range,high_range){ acc<-NULL for (h in low_range:high_range) { temp.nn<-nnet(class~.,size=h,data=train) pred<-predict(temp.nn,test,type="class") Table<-table(test$class,pred) accuracy<-sum(diag(Table))/sum(Table) acc<-c(acc,accuracy) } return(acc) } #################### # # # Exercise 3 # # # #################### set.seed(42) cross.val.nnet(train,test,1,9)
## # weights: 12 ## initial value 364.417657 ## iter 10 value 44.380633 ## iter 20 value 33.313356 ## iter 30 value 32.921823 ## iter 40 value 32.751413 ## iter 50 value 32.458291 ## iter 60 value 32.429321 ## iter 70 value 32.418764 ## iter 80 value 32.331775 ## iter 90 value 32.331194 ## iter 100 value 32.308230 ## final value 32.308230 ## stopped after 100 iterations ## # weights: 23 ## initial value 336.035945 ## iter 10 value 50.031115 ## iter 20 value 44.900883 ## iter 30 value 42.117609 ## iter 40 value 42.032382 ## iter 50 value 42.031243 ## iter 60 value 42.016893 ## final value 42.016821 ## converged ## # weights: 34 ## initial value 336.180770 ## iter 10 value 30.685458 ## iter 20 value 25.271087 ## iter 30 value 22.064008 ## iter 40 value 19.725271 ## iter 50 value 19.632863 ## iter 60 value 19.619118 ## iter 70 value 19.617317 ## final value 19.617252 ## converged ## # weights: 45 ## initial value 343.727637 ## iter 10 value 31.305943 ## iter 20 value 19.634686 ## iter 30 value 9.829079 ## iter 40 value 7.646967 ## iter 50 value 6.991482 ## iter 60 value 6.770721 ## iter 70 value 6.744635 ## iter 80 value 6.734181 ## iter 90 value 6.731891 ## iter 100 value 6.731581 ## final value 6.731581 ## stopped after 100 iterations ## # weights: 56 ## initial value 333.483628 ## iter 10 value 38.324417 ## iter 20 value 29.270707 ## iter 30 value 25.440351 ## iter 40 value 24.094437 ## iter 50 value 23.641925 ## iter 60 value 23.542413 ## iter 70 value 23.516243 ## iter 80 value 23.483435 ## iter 90 value 23.472333 ## iter 100 value 23.470332 ## final value 23.470332 ## stopped after 100 iterations ## # weights: 67 ## initial value 315.985097 ## iter 10 value 38.536149 ## iter 20 value 35.606775 ## iter 30 value 31.851684 ## iter 40 value 31.174335 ## iter 50 value 28.020057 ## iter 60 value 27.167285 ## iter 70 value 26.660018 ## iter 80 value 25.339098 ## iter 90 value 23.868263 ## iter 100 value 23.198007 ## final value 23.198007 ## stopped after 100 iterations ## # weights: 78 ## initial value 320.459388 ## iter 10 value 35.029957 ## iter 20 value 27.567841 ## iter 30 value 22.786864 ## iter 40 value 18.465745 ## iter 50 value 15.734893 ## iter 60 value 15.484935 ## iter 70 value 15.456661 ## iter 80 value 15.456458 ## iter 90 value 15.456260 ## iter 100 value 15.456211 ## final value 15.456211 ## stopped after 100 iterations ## # weights: 89 ## initial value 399.807517 ## iter 10 value 31.543391 ## iter 20 value 21.925367 ## iter 30 value 11.961561 ## iter 40 value 10.369314 ## iter 50 value 8.827267 ## iter 60 value 8.344943 ## iter 70 value 7.987507 ## iter 80 value 7.679489 ## iter 90 value 7.343846 ## iter 100 value 6.233574 ## final value 6.233574 ## stopped after 100 iterations ## # weights: 100 ## initial value 311.715981 ## iter 10 value 33.204124 ## iter 20 value 16.947828 ## iter 30 value 6.369743 ## iter 40 value 1.847597 ## iter 50 value 0.030143 ## iter 60 value 0.002284 ## final value 0.000064 ## converged
## [1] 0.9649122807 0.9707602339 0.9649122807 0.9473684211 0.9590643275 ## [6] 0.9707602339 0.9649122807 0.9649122807 0.9415204678
#So we use 2 or 6 neurons in the hidden layer #################### # # # Exercise 4 # # # #################### library(neuralnet) train$class<-as.numeric(train$class) test$class<-as.numeric(test$class) n <- names(train) f <- as.formula(paste("class ~", paste(n[!n %in% "class"], collapse = " + "))) set.seed(42) model.neuralnet.1<-neuralnet(f,data=train,hidden=6) pred.model.1 <- compute(model.neuralnet.1,test[,1:9]) str(pred.model.1)
## List of 2 ## $ neurons :List of 2 ## ..$ : num [1:171, 1:10] 1 1 1 1 1 1 1 1 1 1 ... ## .. ..- attr(*, "dimnames")=List of 2 ## .. .. ..$ : chr [1:171] "4" "6" "9" "11" ... ## .. .. ..$ : chr [1:10] "1" "V1" "V2" "V3" ... ## ..$ : num [1:171, 1:7] 1 1 1 1 1 1 1 1 1 1 ... ## .. ..- attr(*, "dimnames")=List of 2 ## .. .. ..$ : chr [1:171] "4" "6" "9" "11" ... ## .. .. ..$ : NULL ## $ net.result: num [1:171, 1] 1.915 2.012 0.985 0.981 1.038 ... ## ..- attr(*, "dimnames")=List of 2 ## .. ..$ : chr [1:171] "4" "6" "9" "11" ... ## .. ..$ : NULL
pred.model.1$net.result <-round(pred.model.1$net.result) Table<-table(test$class,pred.model.1$net.result) Table
## ## 1 2 ## 1 106 5 ## 2 1 59
accuracy<-sum(diag(Table))/sum(Table) accuracy
## [1] 0.9649122807
#################### # # # Exercise 5 # # # #################### #'rprop-' set.seed(42) model.neuralnet.2<-neuralnet(f, data=train, hidden=6, algorithm ='rprop-') pred.model.2 <- compute(model.neuralnet.2,test[,1:9]) pred.model.2$net.result <-round(pred.model.2$net.result) pred.model.2$net.result[which(pred.model.5$net.result<=1)]<-1 pred.model.2$net.result[which(pred.model.5$net.result>=2)]<-2 Table<-table(test$class,pred.model.2$net.result) Table
## ## 1 2 ## 1 106 5 ## 2 2 58
accuracy<-sum(diag(Table))/sum(Table) accuracy
## [1] 0.9590643275
#################### # # # Exercise 6 # # # #################### #'sag' set.seed(42) model.neuralnet.3<-neuralnet(f, data=train, hidden=6, algorithm ='sag',learningrate.limit=c(0.01,1),stepmax = 1e+06)
## Warning: algorithm did not converge in 1 of 1 repetition(s) within the ## stepmax
pred.model.3 <- compute(model.neuralnet.3,test[,1:9])
## Warning in is.na(weights): is.na() applied to non-(list or vector) of type ## 'NULL'
## Error in nrow[w] * ncol[w]: non-numeric argument to binary operator
pred.model.3$net.result <-round(pred.model.3$net.result) Table<-table(test$class,pred.model.3$net.result) Table
## ## 1 2 3 ## 1 106 4 1 ## 2 4 56 0
accuracy<-sum(diag(Table))/sum(Table) accuracy
## [1] 0.9473684211
#'slr' set.seed(42) model.neuralnet.4<-neuralnet(f, data=train, hidden=6, algorithm ='slr',learningrate.limit=c(0.01,1), stepmax = 1e+06)
## Warning: algorithm did not converge in 1 of 1 repetition(s) within the ## stepmax
pred.model.4 <- compute(model.neuralnet.4,test[,1:9])
## Warning in is.na(weights): is.na() applied to non-(list or vector) of type ## 'NULL'
## Error in nrow[w] * ncol[w]: non-numeric argument to binary operator
pred.model.4$net.result <-round(pred.model.4$net.result) Table<-table(test$class,pred.model.4$net.result) Table
## ## 1 2 3 ## 1 105 6 0 ## 2 6 48 6
accuracy<-sum(diag(Table))/sum(Table) accuracy
## [1] 0.8947368421
#################### # # # Exercise 7 # # # #################### #'learningrate=0.001' set.seed(42) model.neuralnet.5<-neuralnet(f, data=train, hidden=6, algorithm ='rprop-', learningrate=0.001) pred.model.5 <- compute(model.neuralnet.5,test[,1:9]) pred.model.5$net.result <-round(pred.model.5$net.result) Table<-table(test$class,pred.model.5$net.result) Table
## ## 0 1 2 ## 1 2 104 5 ## 2 0 2 58
pred.model.5$net.result[which(pred.model.5$net.result<=1)]<-1 pred.model.5$net.result[which(pred.model.5$net.result>=2)]<-2 Table<-table(test$class,pred.model.5$net.result) Table
## ## 1 2 ## 1 106 5 ## 2 2 58
accuracy<-sum(diag(Table))/sum(Table) accuracy
## [1] 0.9590643275
#'learningrate=1' set.seed(42) model.neuralnet.6<-neuralnet(f, data=train, hidden=6, algorithm ='rprop-', learningrate=1) pred.model.6 <- compute(model.neuralnet.6,test[,1:9]) pred.model.6$net.result <-round(pred.model.6$net.result) Table<-table(test$class,pred.model.6$net.result) Table
## ## 0 1 2 ## 1 2 104 5 ## 2 0 2 58
pred.model.6$net.result[which(pred.model.6$net.result==0)]<-1 Table<-table(test$class,pred.model.6$net.result) Table
## ## 1 2 ## 1 106 5 ## 2 2 58
accuracy<-sum(diag(Table))/sum(Table) accuracy
## [1] 0.9590643275
#################### # # # Exercise 8 # # # #################### plot(model.neuralnet.5)

#################### # # # Exercise 9 # # # #################### set.seed(42) model.neuralnet.7<-neuralnet(f, data=train, hidden=c(9,9,9)) pred.model.7 <- compute(model.neuralnet.7,test[,1:9]) pred.model.7$net.result <-round(pred.model.7$net.result) Table<-table(test$class,pred.model.7$net.result) Table
## ## 1 2 ## 1 107 4 ## 2 3 57
accuracy<-sum(diag(Table))/sum(Table) accuracy
## [1] 0.9590643275
#################### # # # Exercise 10 # # # #################### plot(model.neuralnet.7)

thanks for such nice projects in R;
exercise 5: small printing mistake: i am mentioning below:
pred.model.2$net.result[which(pred.model.2$net.result<=1)]=2)]<-2
Further in future if we can make the codes simple & small (as in Matlab) and making R users friendly, pl. have a look on that; otherwise an excellent project!
Excellent job sir!!!
Hi,
Exercise 6 is not giving the answer as an error appearing which is even written in the answer section. Then how is possible to make a table with converging of the model. Please let me know?