The package also supports
optimization of functions using RcppEnsmallen
. Find out
more at ensmallen.org. All code is
compiled to C++. During the optimization there is no context switch back
to R.
Here we minimize 2 * norm(x)^2
using simulated
annealing.
# taken from the docs of ensmallen.org
optimize <- compile_optimization_problem(
data = list(),
evaluate = function(x) {
return(2 * norm(x)^2)
},
optimizer = optimizer_SA()
)
# should be roughly 0
optimize(matrix(c(1, -1, 1), ncol = 1))
#> [,1]
#> [1,] 2.311036e-04
#> [2,] -1.752398e-03
#> [3,] 8.630307e-05
optimizer_SA
optimizer_CNE
Here solve a linear regression problem using L-BFGS.
optimize_lbfgs <- compile_optimization_problem(
data = list(design_matrix = type_matrix(), response = type_colvec()),
evaluate = function(beta) {
return(norm(response - design_matrix %*% beta)^2)
},
gradient = function(beta) {
return(-2 %*% t(design_matrix) %*% (response - design_matrix %*% beta))
},
optimizer = optimizer_L_BFGS()
)
# this example is taken from the RcppEnsmallen package
# https://github.com/coatless/rcppensmallen/blob/master/src/example-linear-regression-lbfgs.cpp
n <- 1e6
beta <- c(-2, 1.5, 3, 8.2, 6.6)
p <- length(beta)
X <- cbind(1, matrix(rnorm(n), ncol = p - 1))
y <- X %*% beta + rnorm(n / (p - 1))
# Run optimization with lbfgs fullly in C++
optimize_lbfgs(
design_matrix = X,
response = y,
beta = matrix(runif(p), ncol = 1)
)
#> [,1]
#> [1,] -1.999161
#> [2,] 1.503055
#> [3,] 3.000907
#> [4,] 8.202466
#> [5,] 6.599628
optimizer_L_BFGS
optimizer_GradientDescent