The package also supports
optimization of functions using RcppEnsmallen
. Find out
more at ensmallen.org. All code is
compiled to C++. During the optimization there is no context switch back
to R.
Here we minimize 2 * norm(x)^2
using simulated
annealing.
# taken from the docs of ensmallen.org
optimize <- compile_optimization_problem(
data = list(),
evaluate = function(x) {
return(2 * norm(x)^2)
},
optimizer = optimizer_SA()
)
# should be roughly 0
optimize(matrix(c(1, -1, 1), ncol = 1))
#> [,1]
#> [1,] 0.0005135967
#> [2,] 0.0004418168
#> [3,] 0.0002552780
optimizer_SA
optimizer_CNE
Here solve a linear regression problem using L-BFGS.
optimize_lbfgs <- compile_optimization_problem(
data = list(design_matrix = type_matrix(), response = type_colvec()),
evaluate = function(beta) {
return(norm(response - design_matrix %*% beta)^2)
},
gradient = function(beta) {
return(-2 %*% t(design_matrix) %*% (response - design_matrix %*% beta))
},
optimizer = optimizer_L_BFGS()
)
# this example is taken from the RcppEnsmallen package
# https://github.com/coatless/rcppensmallen/blob/master/src/example-linear-regression-lbfgs.cpp
n <- 1e6
beta <- c(-2, 1.5, 3, 8.2, 6.6)
p <- length(beta)
X <- cbind(1, matrix(rnorm(n), ncol = p - 1))
y <- X %*% beta + rnorm(n / (p - 1))
# Run optimization with lbfgs fullly in C++
optimize_lbfgs(
design_matrix = X,
response = y,
beta = matrix(runif(p), ncol = 1)
)
#> [,1]
#> [1,] -2.003886
#> [2,] 1.501473
#> [3,] 2.998593
#> [4,] 8.198516
#> [5,] 6.599749
optimizer_L_BFGS
optimizer_GradientDescent