# creation of the sample alpha <- 0.1; beta <- 0.7; # the following assumes alpha < 0.5 < beta rexemple <- function(n){ res <- list(); res$x <- cbind(runif(n), runif(n)); res$y <- array(n); u <- runif(n); T = 2 * res$x[,1] + res$x[,2] < 1.5; res$y[T] <- u[T] 1.5;} bayesError <- evaluateClassifError(bayesClassifier) # method 0: linear regression linReg <- lm(train$y ~ train$x[,1] + train$x[,2]) cl <- linReg$coeff lines(c(0, 1), c((0.5-cl[1])/cl[3], (0.5-cl[1]-cl[2])/cl[3]), col="green") linClassifier <- function(x){cl[1] + x[1]*cl[2] + x[2]*cl[3] > 0.5;} linError <- evaluateClassifError(linClassifier) # method 1: logistic regression logReg <- glm(train$y ~ train$x[,1] + train$x[,2], family = binomial) cg <- logReg$coeff lines(c(0, 1), c(-cg[1]/cg[3], (-cg[1]-cg[2])/cg[3]), col="magenta") logClassifier <- function(x){cg[1] + x[1]*cg[2] + x[2]*cg[3] > 0;} logError <- evaluateClassifError(logClassifier) # graph legend and result prints legend(0.8, 1.2, c("true frontier", "linear classifier", "logistic classifier"), col = c("black", "green", "magenta"), lty=1); cat("Bayes classification error:\t", bayesError, "\nLinear classifier error:\t", linError, "\nLogistic classifier error:\t", logError, "\n")