### ** Examples
## No test:
require(graphics)
fr <- function(x) { ## Rosenbrock Banana function
x1 <- x[1]
x2 <- x[2]
100 * (x2 - x1 * x1)^2 + (1 - x1)^2
}
grr <- function(x) { ## Gradient of 'fr'
x1 <- x[1]
x2 <- x[2]
c(-400 * x1 * (x2 - x1 * x1) - 2 * (1 - x1),
200 * (x2 - x1 * x1))
}
optim(c(-1.2,1), fr)
$par
[1] 1.000260 1.000506
$value
[1] 8.825241e-08
$counts
function gradient
195 NA
$convergence
[1] 0
$message
NULL
(res <- optim(c(-1.2,1), fr, grr, method = "BFGS"))
$par
[1] 1 1
$value
[1] 9.594956e-18
$counts
function gradient
110 43
$convergence
[1] 0
$message
NULL
optimHess(res$par, fr, grr)
[,1] [,2]
[1,] 802.0004 -400
[2,] -400.0000 200
optim(c(-1.2,1), fr, NULL, method = "BFGS", hessian = TRUE)
$par
[1] 0.9998044 0.9996084
$value
[1] 3.827383e-08
$counts
function gradient
118 38
$convergence
[1] 0
$message
NULL
$hessian
[,1] [,2]
[1,] 801.6881 -399.9218
[2,] -399.9218 200.0000
## These do not converge in the default number of steps
optim(c(-1.2,1), fr, grr, method = "CG")
$par
[1] -0.7648373 0.5927588
$value
[1] 3.106579
$counts
function gradient
402 101
$convergence
[1] 1
$message
NULL
optim(c(-1.2,1), fr, grr, method = "CG", control = list(type = 2))
$par
[1] 0.9944093 0.9888229
$value
[1] 3.123777e-05
$counts
function gradient
385 101
$convergence
[1] 1
$message
NULL
optim(c(-1.2,1), fr, grr, method = "L-BFGS-B")
$par
[1] 0.9999997 0.9999995
$value
[1] 2.267577e-13
$counts
function gradient
47 47
$convergence
[1] 0
$message
[1] "CONVERGENCE: REL_REDUCTION_OF_F <= FACTR*EPSMCH"
flb <- function(x)
{ p <- length(x); sum(c(1, rep(4, p-1)) * (x - c(1, x[-p])^2)^2) }
## 25-dimensional box constrained
optim(rep(3, 25), flb, NULL, method = "L-BFGS-B",
lower = rep(2, 25), upper = rep(4, 25)) # par[24] is *not* at boundary
$par
[1] 2.000000 2.000000 2.000000 2.000000 2.000000 2.000000 2.000000 2.000000
[9] 2.000000 2.000000 2.000000 2.000000 2.000000 2.000000 2.000000 2.000000
[17] 2.000000 2.000000 2.000000 2.000000 2.000000 2.000000 2.000000 2.109093
[25] 4.000000
$value
[1] 368.1059
$counts
function gradient
6 6
$convergence
[1] 0
$message
[1] "CONVERGENCE: REL_REDUCTION_OF_F <= FACTR*EPSMCH"
## "wild" function , global minimum at about -15.81515
fw <- function (x)
10*sin(0.3*x)*sin(1.3*x^2) + 0.00001*x^4 + 0.2*x+80
plot(fw, -50, 50, n = 1000, main = "optim() minimising 'wild function'")
res <- optim(50, fw, method = "SANN",
control = list(maxit = 20000, temp = 20, parscale = 20))
res
$par
[1] -15.81558
$value
[1] 67.46926
$counts
function gradient
20000 NA
$convergence
[1] 0
$message
NULL
## Now improve locally {typically only by a small bit}:
(r2 <- optim(res$par, fw, method = "BFGS"))
$par
[1] -15.81515
$value
[1] 67.46773
$counts
function gradient
24 3
$convergence
[1] 0
$message
NULL
points(r2$par, r2$value, pch = 8, col = "red", cex = 2)
## Combinatorial optimization: Traveling salesman problem
library(stats) # normally loaded
eurodistmat <- as.matrix(eurodist)
distance <- function(sq) { # Target function
sq2 <- embed(sq, 2)
sum(eurodistmat[cbind(sq2[,2], sq2[,1])])
}
genseq <- function(sq) { # Generate new candidate sequence
idx <- seq(2, NROW(eurodistmat)-1)
changepoints <- sample(idx, size = 2, replace = FALSE)
tmp <- sq[changepoints[1]]
sq[changepoints[1]] <- sq[changepoints[2]]
sq[changepoints[2]] <- tmp
sq
}
sq <- c(1:nrow(eurodistmat), 1) # Initial sequence: alphabetic
distance(sq)
# rotate for conventional orientation
loc <- -cmdscale(eurodist, add = TRUE)$points
x <- loc[,1]; y <- loc[,2]
s <- seq_len(nrow(eurodistmat))
tspinit <- loc[sq,]
plot(x, y, type = "n", asp = 1, xlab = "", ylab = "",
main = "initial solution of traveling salesman problem", axes = FALSE)
arrows(tspinit[s,1], tspinit[s,2], tspinit[s+1,1], tspinit[s+1,2],
angle = 10, col = "green")
text(x, y, labels(eurodist), cex = 0.8)
set.seed(123) # chosen to get a good soln relatively quickly
res <- optim(sq, distance, genseq, method = "SANN",
control = list(maxit = 30000, temp = 2000, trace = TRUE,
REPORT = 500))
sann objective function values
initial value 29625.000000
iter 5000 value 13786.000000
iter 10000 value 13647.000000
iter 15000 value 13433.000000
iter 20000 value 13433.000000
iter 25000 value 13433.000000
iter 29999 value 13433.000000
final value 13433.000000
sann stopped after 29999 iterations
res # Near optimum distance around 12842
$par
[1] 1 19 16 6 10 20 7 11 3 4 5 18 13 12 9 14 2 15 8 17 21 1
$value
[1] 13433
$counts
function gradient
30000 NA
$convergence
[1] 0
$message
NULL
tspres <- loc[res$par,]
plot(x, y, type = "n", asp = 1, xlab = "", ylab = "",
main = "optim() 'solving' traveling salesman problem", axes = FALSE)
arrows(tspres[s,1], tspres[s,2], tspres[s+1,1], tspres[s+1,2],
angle = 10, col = "red")
text(x, y, labels(eurodist), cex = 0.8)
## 1-D minimization: "Brent" or optimize() being preferred.. but NM may be ok and "unavoidable",
## ---------------- so we can suppress the check+warning :
system.time(rO <- optimize(function(x) (x-pi)^2, c(0, 10)))
user system elapsed
0 0 0
system.time(ro <- optim(1, function(x) (x-pi)^2, control=list(warn.1d.NelderMead = FALSE)))
user system elapsed
0 0 0
rO$minimum - pi # 0 (perfect), on one platform
ro$par - pi # ~= 1.9e-4 on one platform
List of 5
$ par : num 3.14
$ value : num 3.47e-08
$ counts : Named int [1:2] 32 NA
..- attr(*, "names")= chr [1:2] "function" "gradient"
$ convergence: int 0
$ message : NULL