From 3774d369ccf80845e90e292e350896b648e2992c Mon Sep 17 00:00:00 2001 From: gaurav Date: Wed, 23 Apr 2025 15:17:03 +0530 Subject: [PATCH 01/17] add nevregrd cma --- .tools/envs/testenv-linux.yml | 1 + .tools/envs/testenv-numpy.yml | 1 + .tools/envs/testenv-others.yml | 1 + .tools/envs/testenv-pandas.yml | 1 + environment.yml | 1 + pyproject.toml | 2 + src/optimagic/algorithms.py | 33 ++++ src/optimagic/config.py | 7 + .../optimizers/nevergrad_optimizers.py | 181 ++++++++++++++++++ 9 files changed, 228 insertions(+) create mode 100644 src/optimagic/optimizers/nevergrad_optimizers.py diff --git a/.tools/envs/testenv-linux.yml b/.tools/envs/testenv-linux.yml index 67fab9017..bfd69858d 100644 --- a/.tools/envs/testenv-linux.yml +++ b/.tools/envs/testenv-linux.yml @@ -22,6 +22,7 @@ dependencies: - pybaum>=0.1.2 # run, tests - scipy>=1.2.1 # run, tests - sqlalchemy # run, tests + - nevergrad # run, tests - seaborn # dev, tests - mypy=1.14.1 # dev, tests - pyyaml # dev, tests diff --git a/.tools/envs/testenv-numpy.yml b/.tools/envs/testenv-numpy.yml index 2cd35c4e0..90eb1ca18 100644 --- a/.tools/envs/testenv-numpy.yml +++ b/.tools/envs/testenv-numpy.yml @@ -20,6 +20,7 @@ dependencies: - pybaum>=0.1.2 # run, tests - scipy>=1.2.1 # run, tests - sqlalchemy # run, tests + - nevergrad # run, tests - seaborn # dev, tests - mypy=1.14.1 # dev, tests - pyyaml # dev, tests diff --git a/.tools/envs/testenv-others.yml b/.tools/envs/testenv-others.yml index 974cffec1..6a19fe3a7 100644 --- a/.tools/envs/testenv-others.yml +++ b/.tools/envs/testenv-others.yml @@ -20,6 +20,7 @@ dependencies: - pybaum>=0.1.2 # run, tests - scipy>=1.2.1 # run, tests - sqlalchemy # run, tests + - nevergrad # run, tests - seaborn # dev, tests - mypy=1.14.1 # dev, tests - pyyaml # dev, tests diff --git a/.tools/envs/testenv-pandas.yml b/.tools/envs/testenv-pandas.yml index 6d88f1016..7a3131471 100644 --- a/.tools/envs/testenv-pandas.yml +++ b/.tools/envs/testenv-pandas.yml @@ -20,6 +20,7 @@ dependencies: - pybaum>=0.1.2 # run, tests - scipy>=1.2.1 # run, tests - sqlalchemy # run, tests + - nevergrad # run, tests - seaborn # dev, tests - mypy=1.14.1 # dev, tests - pyyaml # dev, tests diff --git a/environment.yml b/environment.yml index 34ab4604b..6df094e32 100644 --- a/environment.yml +++ b/environment.yml @@ -25,6 +25,7 @@ dependencies: - scipy>=1.2.1 # run, tests - sqlalchemy # run, tests - myst-nb # docs + - nevergrad # run, tests - sphinx # docs - sphinx-copybutton # docs - sphinx-design # docs diff --git a/pyproject.toml b/pyproject.toml index 40b93ff8d..4c6f7bc95 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ dependencies = [ "sqlalchemy>=1.3", "annotated-types", "typing-extensions", + "nevergrad", ] dynamic = ["version"] keywords = [ @@ -349,6 +350,7 @@ module = [ "cyipopt", "nlopt", "bokeh", + "nevergrad", "bokeh.layouts", "bokeh.models", "bokeh.plotting", diff --git a/src/optimagic/algorithms.py b/src/optimagic/algorithms.py index a892f5a51..1d3a7aa54 100644 --- a/src/optimagic/algorithms.py +++ b/src/optimagic/algorithms.py @@ -17,6 +17,7 @@ from optimagic.optimizers.ipopt import Ipopt from optimagic.optimizers.nag_optimizers import NagDFOLS, NagPyBOBYQA from optimagic.optimizers.neldermead import NelderMeadParallel +from optimagic.optimizers.nevergrad_optimizers import NevergradCMAES from optimagic.optimizers.nlopt_optimizers import ( NloptBOBYQA, NloptCCSAQ, @@ -170,6 +171,7 @@ def Scalar( @dataclass(frozen=True) class BoundedGlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -362,6 +364,7 @@ def Scalar(self) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithm @dataclass(frozen=True) class BoundedGlobalGradientFreeScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -401,6 +404,7 @@ def Parallel(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalGradientFreeParallelAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -456,6 +460,7 @@ def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorith @dataclass(frozen=True) class GlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -603,6 +608,7 @@ def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorit @dataclass(frozen=True) class BoundedGradientFreeParallelScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -697,6 +703,7 @@ def Scalar(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalParallelScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1020,6 +1027,7 @@ def Local(self) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalGradientFreeAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -1083,6 +1091,7 @@ def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GlobalGradientFreeScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -1126,6 +1135,7 @@ def Parallel(self) -> GlobalGradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalGradientFreeParallelAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1291,6 +1301,7 @@ def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGradientFreeScalarAlgorithms(AlgoSelection): nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM @@ -1362,6 +1373,7 @@ def Parallel(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms: @dataclass(frozen=True) class BoundedGradientFreeParallelAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -1443,6 +1455,7 @@ def Scalar(self) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class GradientFreeParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1511,6 +1524,7 @@ def Scalar(self) -> BoundedGlobalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -1559,6 +1573,7 @@ def Parallel(self) -> BoundedGlobalParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalParallelAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1627,6 +1642,7 @@ def Scalar(self) -> GlobalNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalParallelScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1860,6 +1876,7 @@ def Scalar(self) -> BoundedNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedParallelScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2115,6 +2132,7 @@ def Local(self) -> GradientBasedLikelihoodLocalAlgorithms: @dataclass(frozen=True) class GlobalGradientFreeAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -2202,6 +2220,7 @@ def Scalar(self) -> GradientFreeLocalScalarAlgorithms: class BoundedGradientFreeAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM @@ -2298,6 +2317,7 @@ def Scalar(self) -> GradientFreeNonlinearConstrainedScalarAlgorithms: class GradientFreeScalarAlgorithms(AlgoSelection): nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM @@ -2378,6 +2398,7 @@ def Parallel(self) -> GradientFreeLeastSquaresParallelAlgorithms: @dataclass(frozen=True) class GradientFreeParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -2417,6 +2438,7 @@ def Scalar(self) -> GradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -2498,6 +2520,7 @@ def Scalar(self) -> GlobalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GlobalScalarAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -2550,6 +2573,7 @@ def Parallel(self) -> GlobalParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalParallelAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2811,6 +2835,7 @@ class BoundedScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -2911,6 +2936,7 @@ def Parallel(self) -> BoundedLeastSquaresParallelAlgorithms: @dataclass(frozen=True) class BoundedParallelAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -3012,6 +3038,7 @@ def Scalar(self) -> NonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class ParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -3118,6 +3145,7 @@ class GradientFreeAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM @@ -3188,6 +3216,7 @@ def Scalar(self) -> GradientFreeScalarAlgorithms: @dataclass(frozen=True) class GlobalAlgorithms(AlgoSelection): + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -3319,6 +3348,7 @@ class BoundedAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -3454,6 +3484,7 @@ class ScalarAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -3582,6 +3613,7 @@ def Local(self) -> LikelihoodLocalAlgorithms: @dataclass(frozen=True) class ParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -3629,6 +3661,7 @@ class Algorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel + nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA diff --git a/src/optimagic/config.py b/src/optimagic/config.py index d63ef54ac..023065eb4 100644 --- a/src/optimagic/config.py +++ b/src/optimagic/config.py @@ -54,6 +54,13 @@ else: IS_PYGMO_INSTALLED = True +try: + import nevergrad as ng # noqa: F401 +except ImportError: + IS_NEVERGRAD_INSTALLED = False +else: + IS_NEVERGRAD_INSTALLED = True + try: import cyipopt # noqa: F401 except ImportError: diff --git a/src/optimagic/optimizers/nevergrad_optimizers.py b/src/optimagic/optimizers/nevergrad_optimizers.py new file mode 100644 index 000000000..c653e38e4 --- /dev/null +++ b/src/optimagic/optimizers/nevergrad_optimizers.py @@ -0,0 +1,181 @@ +"""Implement nevergrad optimizers.""" + +from concurrent.futures import Future +from dataclasses import dataclass +from typing import Any, List + +import numpy as np +from numpy.typing import NDArray + +from optimagic import mark +from optimagic.config import IS_NEVERGRAD_INSTALLED +from optimagic.exceptions import NotInstalledError +from optimagic.optimization.algo_options import ( + STOPPING_MAXFUN_GLOBAL, +) +from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult +from optimagic.optimization.internal_optimization_problem import ( + InternalOptimizationProblem, +) +from optimagic.typing import ( + AggregationLevel, + Callable, + NonNegativeFloat, + PositiveInt, +) + +if IS_NEVERGRAD_INSTALLED: + import nevergrad as ng + + +@mark.minimizer( + name="nevergrad_cmaes", + solver_type=AggregationLevel.SCALAR, + is_available=IS_NEVERGRAD_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + supports_parallelism=True, + supports_bounds=True, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, +) +@dataclass(frozen=True) +class NevergradCMAES(Algorithm): + scale: NonNegativeFloat = 1.0 + population_size: int | None = None + elitist: bool = False + diagonal: bool = False + use_fast_implementation: bool = False + high_speed: bool = False + n_cores: PositiveInt = 1 + stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL + learning_rate_rank_one_update: NonNegativeFloat = 1.0 + learning_rate_rank_mu_update: NonNegativeFloat = 1.0 + ftol: NonNegativeFloat = 1e-11 + xtol: NonNegativeFloat = 1e-11 + random_init: bool = False + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + cma_options = { + "tolx": self.xtol, + "tolfun": self.ftol, + "CMA_rankmu": self.learning_rate_rank_mu_update, + "CMA_rankone": self.learning_rate_rank_one_update, + } + + optimizer = ng.optimizers.ParametrizedCMA( + scale=self.scale, + popsize=self.population_size, + elitist=self.elitist, + diagonal=self.diagonal, + high_speed=self.high_speed, + fcmaes=self.use_fast_implementation, + random_init=self.random_init, + inopts=cma_options, + ) + + res = nevergrad_internal( + problem=problem, + x0=x0, + optimizer=optimizer, + stopping_maxfun=self.stopping_maxfun, + n_cores=self.n_cores, + ) + + return res + + +""" helper function for nevergrad""" + + +def nevergrad_internal( + problem: InternalOptimizationProblem, + x0: NDArray[np.float64], + n_cores: int, + optimizer: Callable[..., Any], + stopping_maxfun: int, +) -> InternalOptimizeResult: + if not IS_NEVERGRAD_INSTALLED: + raise NotInstalledError("Nevergrad is not installed.") + + param = ng.p.Array( + init=np.clip(x0, problem.bounds.lower, problem.bounds.upper) + ).set_bounds(problem.bounds.lower, upper=problem.bounds.upper) + instrum = ng.p.Instrumentation(param) + + parametrized_optimizer = optimizer( + parametrization=instrum, budget=stopping_maxfun, num_workers=n_cores + ) + + executor = NevergradExecutor( + batch_fun=problem.batch_fun, n_cores=n_cores, budget=stopping_maxfun + ) + recommendation = parametrized_optimizer.minimize( + problem.fun, + executor=executor, + ) + best_x = recommendation.value[0][0] + + result = InternalOptimizeResult( + x=best_x, + fun=problem.fun(best_x), + success=True, + n_fun_evals=parametrized_optimizer.num_ask, + n_jac_evals=0, + ) + + return result + + +""" Custom Executor class that uses batch_fun inside """ + + +class NevergradExecutor: + def __init__( + self, + batch_fun: Callable[..., list[float | NDArray[np.float64]]], + n_cores: int, + budget: int, + ): + self._batch_fun: Callable[..., list[float | NDArray[np.float64]]] = batch_fun + self._n_cores: int = n_cores + self._budget: int = budget + self._batch_inputs: List[NDArray[np.float64]] = [] + self._futures: List[Future[Any]] = [] + self._submitted_count: int = 0 + + def submit( + self, func: Callable[..., Any], *args: Any, **kwargs: Any + ) -> Future[Any]: + future: Future[Any] = Future() + x = args[0] + self._batch_inputs.append(x) + self._futures.append(future) + self._submitted_count += 1 + + if len(self._batch_inputs) == self._n_cores: + self._run_batch() + + if self._submitted_count == self._budget: + self._flush() + + return future + + def _run_batch(self) -> None: + results = self._batch_fun( + x_list=self._batch_inputs, n_cores=len(self._batch_inputs) + ) + + for future, result in zip(self._futures, results, strict=False): + future.set_result(result) + + self._batch_inputs.clear() + self._futures.clear() + + def _flush(self) -> None: + if self._batch_inputs: + self._run_batch() From 9c0eafd67b161e5244feb006c834b85ac1f5d7b8 Mon Sep 17 00:00:00 2001 From: gauravmanmode Date: Wed, 30 Apr 2025 22:40:57 +0530 Subject: [PATCH 02/17] Update environment.yml --- environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index 9467842fb..83e340b3a 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,6 @@ dependencies: - scipy>=1.2.1 # run, tests - sqlalchemy # run, tests - myst-nb # docs - - nevergrad # run, tests - sphinx # docs - sphinx-copybutton # docs - sphinx-design # docs @@ -43,6 +42,7 @@ dependencies: - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests + - nevergrad # dev, tests - kaleido # dev, tests - pre-commit>=4 # dev - -e . # dev From 4f76bf26d02fd22979fc5e3a0275598d477c9c43 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 30 Apr 2025 17:11:31 +0000 Subject: [PATCH 03/17] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .tools/envs/testenv-linux.yml | 2 +- .tools/envs/testenv-numpy.yml | 2 +- .tools/envs/testenv-others.yml | 2 +- .tools/envs/testenv-pandas.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.tools/envs/testenv-linux.yml b/.tools/envs/testenv-linux.yml index 9a1947396..7ef601acd 100644 --- a/.tools/envs/testenv-linux.yml +++ b/.tools/envs/testenv-linux.yml @@ -22,7 +22,6 @@ dependencies: - pybaum>=0.1.2 # run, tests - scipy>=1.2.1 # run, tests - sqlalchemy # run, tests - - nevergrad # run, tests - seaborn # dev, tests - mypy=1.14.1 # dev, tests - pyyaml # dev, tests @@ -33,6 +32,7 @@ dependencies: - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests + - nevergrad # dev, tests - kaleido # dev, tests - pandas-stubs # dev, tests - types-cffi # dev, tests diff --git a/.tools/envs/testenv-numpy.yml b/.tools/envs/testenv-numpy.yml index 24d619375..9b2a0eb9e 100644 --- a/.tools/envs/testenv-numpy.yml +++ b/.tools/envs/testenv-numpy.yml @@ -20,7 +20,6 @@ dependencies: - pybaum>=0.1.2 # run, tests - scipy>=1.2.1 # run, tests - sqlalchemy # run, tests - - nevergrad # run, tests - seaborn # dev, tests - mypy=1.14.1 # dev, tests - pyyaml # dev, tests @@ -31,6 +30,7 @@ dependencies: - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests + - nevergrad # dev, tests - kaleido # dev, tests - types-cffi # dev, tests - types-openpyxl # dev, tests diff --git a/.tools/envs/testenv-others.yml b/.tools/envs/testenv-others.yml index 2592accd3..ee8a34b11 100644 --- a/.tools/envs/testenv-others.yml +++ b/.tools/envs/testenv-others.yml @@ -20,7 +20,6 @@ dependencies: - pybaum>=0.1.2 # run, tests - scipy>=1.2.1 # run, tests - sqlalchemy # run, tests - - nevergrad # run, tests - seaborn # dev, tests - mypy=1.14.1 # dev, tests - pyyaml # dev, tests @@ -31,6 +30,7 @@ dependencies: - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests + - nevergrad # dev, tests - kaleido # dev, tests - pandas-stubs # dev, tests - types-cffi # dev, tests diff --git a/.tools/envs/testenv-pandas.yml b/.tools/envs/testenv-pandas.yml index 87dc5f7cd..e69baf05f 100644 --- a/.tools/envs/testenv-pandas.yml +++ b/.tools/envs/testenv-pandas.yml @@ -20,7 +20,6 @@ dependencies: - pybaum>=0.1.2 # run, tests - scipy>=1.2.1 # run, tests - sqlalchemy # run, tests - - nevergrad # run, tests - seaborn # dev, tests - mypy=1.14.1 # dev, tests - pyyaml # dev, tests @@ -31,6 +30,7 @@ dependencies: - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests + - nevergrad # dev, tests - kaleido # dev, tests - types-cffi # dev, tests - types-openpyxl # dev, tests From c8114b27c7ed75296f57df5774d1396146bc800b Mon Sep 17 00:00:00 2001 From: gaurav Date: Thu, 1 May 2025 11:53:29 +0530 Subject: [PATCH 04/17] fixes --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 4849d5396..1f8329c2b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,6 +17,7 @@ dependencies = [ "annotated-types", "typing-extensions", "iminuit", + "nevergrad", ] dynamic = ["version"] keywords = [ From 389b1cc708cf3aabe7315336fce01c242832b465 Mon Sep 17 00:00:00 2001 From: gaurav Date: Sun, 4 May 2025 14:10:26 +0530 Subject: [PATCH 05/17] remove executor --- .pre-commit-config.yaml | 11 ++- .../optimizers/nevergrad_optimizers.py | 96 ++++++------------- 2 files changed, 34 insertions(+), 73 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4401ee534..69b6a4d24 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,6 +29,7 @@ repos: additional_dependencies: - hatchling - ruff + - iminuit - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: @@ -62,7 +63,7 @@ repos: exclude: docs/ - id: check-ast - repo: https://github.com/adrienverge/yamllint.git - rev: v1.35.1 + rev: v1.37.0 hooks: - id: yamllint exclude: tests/optimagic/optimizers/_pounders/fixtures @@ -79,7 +80,7 @@ repos: - --blank exclude: src/optimagic/optimization/algo_options.py - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.2 + rev: v0.11.8 hooks: # Run the linter. - id: ruff @@ -96,7 +97,7 @@ repos: - pyi - jupyter - repo: https://github.com/executablebooks/mdformat - rev: 0.7.21 + rev: 0.7.22 hooks: - id: mdformat additional_dependencies: @@ -108,7 +109,7 @@ repos: - '88' files: (README\.md) - repo: https://github.com/executablebooks/mdformat - rev: 0.7.21 + rev: 0.7.22 hooks: - id: mdformat additional_dependencies: @@ -131,7 +132,7 @@ repos: args: - --drop-empty-cells - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.14.1 + rev: v1.15.0 hooks: - id: mypy files: src|tests diff --git a/src/optimagic/optimizers/nevergrad_optimizers.py b/src/optimagic/optimizers/nevergrad_optimizers.py index c653e38e4..776bcaecc 100644 --- a/src/optimagic/optimizers/nevergrad_optimizers.py +++ b/src/optimagic/optimizers/nevergrad_optimizers.py @@ -1,8 +1,7 @@ """Implement nevergrad optimizers.""" -from concurrent.futures import Future from dataclasses import dataclass -from typing import Any, List +from typing import Any import numpy as np from numpy.typing import NDArray @@ -44,6 +43,7 @@ @dataclass(frozen=True) class NevergradCMAES(Algorithm): scale: NonNegativeFloat = 1.0 + seed: int | None = None population_size: int | None = None elitist: bool = False diagonal: bool = False @@ -67,7 +67,7 @@ def _solve_internal_problem( "CMA_rankone": self.learning_rate_rank_one_update, } - optimizer = ng.optimizers.ParametrizedCMA( + raw_optimizer = ng.optimizers.ParametrizedCMA( scale=self.scale, popsize=self.population_size, elitist=self.elitist, @@ -78,12 +78,13 @@ def _solve_internal_problem( inopts=cma_options, ) - res = nevergrad_internal( + res = _nevergrad_internal( problem=problem, x0=x0, - optimizer=optimizer, + raw_optimizer=raw_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, + seed=self.seed, ) return res @@ -92,12 +93,13 @@ def _solve_internal_problem( """ helper function for nevergrad""" -def nevergrad_internal( +def _nevergrad_internal( problem: InternalOptimizationProblem, x0: NDArray[np.float64], n_cores: int, - optimizer: Callable[..., Any], + raw_optimizer: Callable[..., Any], stopping_maxfun: int, + seed: int | None, ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError("Nevergrad is not installed.") @@ -105,77 +107,35 @@ def nevergrad_internal( param = ng.p.Array( init=np.clip(x0, problem.bounds.lower, problem.bounds.upper) ).set_bounds(problem.bounds.lower, upper=problem.bounds.upper) - instrum = ng.p.Instrumentation(param) - parametrized_optimizer = optimizer( + instrum = ng.p.Instrumentation(param) + instrum.random_state.seed(12) + optimizer = raw_optimizer( parametrization=instrum, budget=stopping_maxfun, num_workers=n_cores ) - executor = NevergradExecutor( - batch_fun=problem.batch_fun, n_cores=n_cores, budget=stopping_maxfun - ) - recommendation = parametrized_optimizer.minimize( - problem.fun, - executor=executor, - ) + while optimizer.num_ask < stopping_maxfun: + x_list = [ + optimizer.ask() + for _ in range(min(n_cores, stopping_maxfun - optimizer.num_ask)) + ] + losses = problem.batch_fun([x.value[0][0] for x in x_list], n_cores=n_cores) + for x, loss in zip(x_list, losses, strict=True): + optimizer.tell(x, loss) + + recommendation = optimizer.provide_recommendation() best_x = recommendation.value[0][0] + loss = recommendation.loss + if loss is None: + loss = problem.fun(best_x) result = InternalOptimizeResult( x=best_x, - fun=problem.fun(best_x), + fun=loss, success=True, - n_fun_evals=parametrized_optimizer.num_ask, + n_fun_evals=optimizer.num_ask, n_jac_evals=0, + n_hess_evals=0, ) return result - - -""" Custom Executor class that uses batch_fun inside """ - - -class NevergradExecutor: - def __init__( - self, - batch_fun: Callable[..., list[float | NDArray[np.float64]]], - n_cores: int, - budget: int, - ): - self._batch_fun: Callable[..., list[float | NDArray[np.float64]]] = batch_fun - self._n_cores: int = n_cores - self._budget: int = budget - self._batch_inputs: List[NDArray[np.float64]] = [] - self._futures: List[Future[Any]] = [] - self._submitted_count: int = 0 - - def submit( - self, func: Callable[..., Any], *args: Any, **kwargs: Any - ) -> Future[Any]: - future: Future[Any] = Future() - x = args[0] - self._batch_inputs.append(x) - self._futures.append(future) - self._submitted_count += 1 - - if len(self._batch_inputs) == self._n_cores: - self._run_batch() - - if self._submitted_count == self._budget: - self._flush() - - return future - - def _run_batch(self) -> None: - results = self._batch_fun( - x_list=self._batch_inputs, n_cores=len(self._batch_inputs) - ) - - for future, result in zip(self._futures, results, strict=False): - future.set_result(result) - - self._batch_inputs.clear() - self._futures.clear() - - def _flush(self) -> None: - if self._batch_inputs: - self._run_batch() From 7c73c8de22f3738b200b55ea826cc0f02662c5b3 Mon Sep 17 00:00:00 2001 From: gaurav Date: Mon, 5 May 2025 03:07:04 +0530 Subject: [PATCH 06/17] docs --- docs/source/algorithms.md | 46 +++++++++++++++++++++++++++++++++++++++ docs/source/refs.bib | 10 +++++++++ 2 files changed, 56 insertions(+) diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index 02b44103c..f41702296 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -3984,6 +3984,52 @@ iminuit). - Values greater than 1 specify the maximum number of restart attempts. ``` +## Optimizers from Nevergrad + +optimagic supports the following optimizers from +[Nevergrad](https://facebookresearch.github.io/nevergrad/index.html). To use optimizers +from Nevergrad, you need to have +[Nevergrad](https://facebookresearch.github.io/nevergrad/getting_started.html#installing) +installed (pip install nevergrad). Note that nevergrad requires numpy \<> 2.0. + +```{eval-rst} +.. dropdown:: nevegrad_cmaes + + .. code-block:: + + "nevergrad_cmaes" + + `The Covariance Matrix Adaptation Evolution Strategy (CMA-ES) is a stochastic derivative-free numerical optimization algorithm for difficult (non-convex, ill-conditioned, multi-modal, rugged, noisy) optimization problems in continuous search spaces. + + The version available through nevergrad wraps an external implementation `pycma `_. + + + **Optimizer Parameters:** + + - **scale** (float): Scale of the search. + + - **elitist** (bool): Whether to switch to elitist mode, i.e., `+` mode instead of `,` mode, where the best point in the population is always retained. + + - **popsize** (Optional[int]): Population size. Should be `n * self.num_workers` for integer `n >= 1`. + - Default is `max(self.num_workers, 4 + int(3 * np.log(self.dimension)))`. + + - **popsize_factor** (float): Factor used in the formula for computing the population size. + - Default is `3.0`. + + - **diagonal** (bool): Use the diagonal version of CMA, which is advised for high-dimensional problems. + + - **high_speed** (bool): Use a metamodel for recommendation to speed up optimization. + + - **fcmaes** (bool): Use the fast CMA-ES implementation. + - Cannot be used with `diagonal=True`. + - Produces equivalent results and is preferable for high dimensions or when objective function evaluations are fast. + + - **random_init** (bool): Use a randomized initialization for the optimization. + + - **inopts** (optional dict): Dictionary to override any `inopts` parameter of the wrapped CMA optimizer. + - See [CMA-ES pycma GitHub](https://github.com/CMA-ES/pycma) for supported parameters. +``` + ## References ```{eval-rst} diff --git a/docs/source/refs.bib b/docs/source/refs.bib index 45f183b84..2f7dd7549 100644 --- a/docs/source/refs.bib +++ b/docs/source/refs.bib @@ -906,4 +906,14 @@ @article{JAMES1975343 author = {F. James and M. Roos} } +@article{hansen2023, +title={The CMA Evolution Strategy: A Tutorial}, +author={Nikolaus Hansen}, +year={2023}, +eprint={1604.00772}, +archivePrefix={arXiv}, +primaryClass={cs.LG}, +url={https://arxiv.org/abs/1604.00772}, +} + @Comment{jabref-meta: databaseType:bibtex;} From 231e9b257715b2226391b1eb8adec4706ecbf5fd Mon Sep 17 00:00:00 2001 From: gaurav Date: Mon, 5 May 2025 16:42:32 +0530 Subject: [PATCH 07/17] add docs --- docs/source/algorithms.md | 39 +++++++++++++++---- docs/source/refs.bib | 2 +- .../optimizers/nevergrad_optimizers.py | 4 +- 3 files changed, 33 insertions(+), 12 deletions(-) diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index f41702296..d02f397c7 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -3990,7 +3990,7 @@ optimagic supports the following optimizers from [Nevergrad](https://facebookresearch.github.io/nevergrad/index.html). To use optimizers from Nevergrad, you need to have [Nevergrad](https://facebookresearch.github.io/nevergrad/getting_started.html#installing) -installed (pip install nevergrad). Note that nevergrad requires numpy \<> 2.0. +installed (`pip install nevergrad`). Note that nevergrad requires numpy \<= 2.0. ```{eval-rst} .. dropdown:: nevegrad_cmaes @@ -3999,19 +3999,31 @@ installed (pip install nevergrad). Note that nevergrad requires numpy \<> 2.0. "nevergrad_cmaes" - `The Covariance Matrix Adaptation Evolution Strategy (CMA-ES) is a stochastic derivative-free numerical optimization algorithm for difficult (non-convex, ill-conditioned, multi-modal, rugged, noisy) optimization problems in continuous search spaces. + The Covariance Matrix Adaptation Evolution Strategy (CMA-ES) is a stochastic derivative-free numerical optimization algorithm for difficult (non-convex, ill-conditioned, multi-modal, rugged, noisy) optimization problems in continuous search spaces. The version available through nevergrad wraps an external implementation `pycma `_. + The original method can be found in + :cite:`Hansen2023`. + + The fast implementation relies on fcmaes which can be installed with `pip install fcmaes`. **Optimizer Parameters:** - **scale** (float): Scale of the search. - **elitist** (bool): Whether to switch to elitist mode, i.e., `+` mode instead of `,` mode, where the best point in the population is always retained. + - **population_size** (Optional[int]): Population size. Should be + + .. math:: - - **popsize** (Optional[int]): Population size. Should be `n * self.num_workers` for integer `n >= 1`. - - Default is `max(self.num_workers, 4 + int(3 * np.log(self.dimension)))`. + \text{population_size} = n \times \text{num_workers} \quad \text{for integer } n \geq 1 + + Default is + + .. math:: + + \max\left( \text{num_workers},\ 4 + \left\lfloor 3 \cdot \log(\text{dimension}) \right\rfloor \right) - **popsize_factor** (float): Factor used in the formula for computing the population size. - Default is `3.0`. @@ -4020,14 +4032,25 @@ installed (pip install nevergrad). Note that nevergrad requires numpy \<> 2.0. - **high_speed** (bool): Use a metamodel for recommendation to speed up optimization. - - **fcmaes** (bool): Use the fast CMA-ES implementation. + - **use_fast_implementation** (bool): Use the fast CMA-ES implementation. - Cannot be used with `diagonal=True`. - Produces equivalent results and is preferable for high dimensions or when objective function evaluations are fast. - - **random_init** (bool): Use a randomized initialization for the optimization. + - **stopping.maxfun** (int): The maximum number of criterion + evaluations. + + - **ftol** (float): stopping criteria on the x tolerance. + + - **xtol** (float): stopping criteria on the f tolerance. + + - **seed** (int): seed used by the internal random number generator. + + - **learning_rate_rank_one_update** (float): Multiplier for the rank-one update learning rate of the covariance matrix. + - Default is `1.0`. + + - **learning_rate_rank_mu_update** (float): Multiplier for the rank-mu update learning rate of the covariance matrix. + - Default is `1.0`. - - **inopts** (optional dict): Dictionary to override any `inopts` parameter of the wrapped CMA optimizer. - - See [CMA-ES pycma GitHub](https://github.com/CMA-ES/pycma) for supported parameters. ``` ## References diff --git a/docs/source/refs.bib b/docs/source/refs.bib index 2f7dd7549..8b9a7a23a 100644 --- a/docs/source/refs.bib +++ b/docs/source/refs.bib @@ -906,7 +906,7 @@ @article{JAMES1975343 author = {F. James and M. Roos} } -@article{hansen2023, +@misc{Hansen2023, title={The CMA Evolution Strategy: A Tutorial}, author={Nikolaus Hansen}, year={2023}, diff --git a/src/optimagic/optimizers/nevergrad_optimizers.py b/src/optimagic/optimizers/nevergrad_optimizers.py index 776bcaecc..6b2af2ab0 100644 --- a/src/optimagic/optimizers/nevergrad_optimizers.py +++ b/src/optimagic/optimizers/nevergrad_optimizers.py @@ -55,7 +55,6 @@ class NevergradCMAES(Algorithm): learning_rate_rank_mu_update: NonNegativeFloat = 1.0 ftol: NonNegativeFloat = 1e-11 xtol: NonNegativeFloat = 1e-11 - random_init: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] @@ -74,7 +73,6 @@ def _solve_internal_problem( diagonal=self.diagonal, high_speed=self.high_speed, fcmaes=self.use_fast_implementation, - random_init=self.random_init, inopts=cma_options, ) @@ -109,7 +107,7 @@ def _nevergrad_internal( ).set_bounds(problem.bounds.lower, upper=problem.bounds.upper) instrum = ng.p.Instrumentation(param) - instrum.random_state.seed(12) + instrum.random_state.seed(seed) optimizer = raw_optimizer( parametrization=instrum, budget=stopping_maxfun, num_workers=n_cores ) From f40ac8fb6372f97aa30e1310c25945835b6c5fc2 Mon Sep 17 00:00:00 2001 From: gaurav Date: Thu, 8 May 2025 11:27:04 +0530 Subject: [PATCH 08/17] improve error msg --- src/optimagic/optimizers/nevergrad_optimizers.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/optimagic/optimizers/nevergrad_optimizers.py b/src/optimagic/optimizers/nevergrad_optimizers.py index 6b2af2ab0..ad4e48bc1 100644 --- a/src/optimagic/optimizers/nevergrad_optimizers.py +++ b/src/optimagic/optimizers/nevergrad_optimizers.py @@ -100,14 +100,22 @@ def _nevergrad_internal( seed: int | None, ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: - raise NotInstalledError("Nevergrad is not installed.") + raise NotInstalledError( + "The nevergrad_pso optimizer requires the 'nevergrad' package to be " + "installed. You can install it with `pip install nevergrad`. " + "Visit https://facebookresearch.github.io/nevergrad/getting_started.html" + " for more detailed installation instructions." + ) param = ng.p.Array( init=np.clip(x0, problem.bounds.lower, problem.bounds.upper) ).set_bounds(problem.bounds.lower, upper=problem.bounds.upper) instrum = ng.p.Instrumentation(param) - instrum.random_state.seed(seed) + + if seed is not None: + instrum.random_state.seed(seed) + optimizer = raw_optimizer( parametrization=instrum, budget=stopping_maxfun, num_workers=n_cores ) From 94e3582d0066285f3de6f70943f32d4e3345ba2c Mon Sep 17 00:00:00 2001 From: gaurav Date: Thu, 8 May 2025 11:38:44 +0530 Subject: [PATCH 09/17] improve error msg --- src/optimagic/optimizers/nevergrad_optimizers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/optimagic/optimizers/nevergrad_optimizers.py b/src/optimagic/optimizers/nevergrad_optimizers.py index ad4e48bc1..687603f7f 100644 --- a/src/optimagic/optimizers/nevergrad_optimizers.py +++ b/src/optimagic/optimizers/nevergrad_optimizers.py @@ -101,7 +101,7 @@ def _nevergrad_internal( ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError( - "The nevergrad_pso optimizer requires the 'nevergrad' package to be " + "The nevergrad_cmaes optimizer requires the 'nevergrad' package to be " "installed. You can install it with `pip install nevergrad`. " "Visit https://facebookresearch.github.io/nevergrad/getting_started.html" " for more detailed installation instructions." From 1669438e73241bdfcb8947b85246aca42e24e63b Mon Sep 17 00:00:00 2001 From: gaurav Date: Wed, 14 May 2025 00:09:42 +0530 Subject: [PATCH 10/17] refactor --- src/optimagic/algorithms.py | 34 ++++++- .../optimizers/nevergrad_optimizers.py | 95 +++++++++++++++---- 2 files changed, 111 insertions(+), 18 deletions(-) diff --git a/src/optimagic/algorithms.py b/src/optimagic/algorithms.py index 8a8f90b26..1fb759001 100644 --- a/src/optimagic/algorithms.py +++ b/src/optimagic/algorithms.py @@ -18,7 +18,7 @@ from optimagic.optimizers.ipopt import Ipopt from optimagic.optimizers.nag_optimizers import NagDFOLS, NagPyBOBYQA from optimagic.optimizers.neldermead import NelderMeadParallel -from optimagic.optimizers.nevergrad_optimizers import NevergradCMAES +from optimagic.optimizers.nevergrad_optimizers import NevergradCMAES, NevergradPSO from optimagic.optimizers.nlopt_optimizers import ( NloptBOBYQA, NloptCCSAQ, @@ -173,6 +173,7 @@ def Scalar( @dataclass(frozen=True) class BoundedGlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -367,6 +368,7 @@ def Scalar(self) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithm @dataclass(frozen=True) class BoundedGlobalGradientFreeScalarAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -407,6 +409,7 @@ def Parallel(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalGradientFreeParallelAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -463,6 +466,7 @@ def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorith @dataclass(frozen=True) class GlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -611,6 +615,7 @@ def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorit @dataclass(frozen=True) class BoundedGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -706,6 +711,7 @@ def Scalar(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalParallelScalarAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1033,6 +1039,7 @@ def Local(self) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalGradientFreeAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -1097,6 +1104,7 @@ def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GlobalGradientFreeScalarAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -1141,6 +1149,7 @@ def Parallel(self) -> GlobalGradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalGradientFreeParallelAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1307,6 +1316,7 @@ def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms: class BoundedGradientFreeScalarAlgorithms(AlgoSelection): nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM @@ -1379,6 +1389,7 @@ def Parallel(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms: @dataclass(frozen=True) class BoundedGradientFreeParallelAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -1461,6 +1472,7 @@ def Scalar(self) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms: class GradientFreeParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1530,6 +1542,7 @@ def Scalar(self) -> BoundedGlobalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalScalarAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -1579,6 +1592,7 @@ def Parallel(self) -> BoundedGlobalParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalParallelAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1648,6 +1662,7 @@ def Scalar(self) -> GlobalNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalParallelScalarAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1883,6 +1898,7 @@ def Scalar(self) -> BoundedNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedParallelScalarAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2142,6 +2158,7 @@ def Local(self) -> GradientBasedLikelihoodLocalAlgorithms: @dataclass(frozen=True) class GlobalGradientFreeAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -2230,6 +2247,7 @@ class BoundedGradientFreeAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM @@ -2327,6 +2345,7 @@ class GradientFreeScalarAlgorithms(AlgoSelection): nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM @@ -2408,6 +2427,7 @@ def Parallel(self) -> GradientFreeLeastSquaresParallelAlgorithms: class GradientFreeParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -2448,6 +2468,7 @@ def Scalar(self) -> GradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -2530,6 +2551,7 @@ def Scalar(self) -> GlobalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GlobalScalarAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -2583,6 +2605,7 @@ def Parallel(self) -> GlobalParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalParallelAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2848,6 +2871,7 @@ class BoundedScalarAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -2949,6 +2973,7 @@ def Parallel(self) -> BoundedLeastSquaresParallelAlgorithms: @dataclass(frozen=True) class BoundedParallelAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -3051,6 +3076,7 @@ def Scalar(self) -> NonlinearConstrainedParallelScalarAlgorithms: class ParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -3159,6 +3185,7 @@ class GradientFreeAlgorithms(AlgoSelection): nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM @@ -3230,6 +3257,7 @@ def Scalar(self) -> GradientFreeScalarAlgorithms: @dataclass(frozen=True) class GlobalAlgorithms(AlgoSelection): nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH @@ -3364,6 +3392,7 @@ class BoundedAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -3501,6 +3530,7 @@ class ScalarAlgorithms(AlgoSelection): nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -3630,6 +3660,7 @@ def Local(self) -> LikelihoodLocalAlgorithms: class ParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen @@ -3679,6 +3710,7 @@ class Algorithms(AlgoSelection): nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES + nevergrad_pso: Type[NevergradPSO] = NevergradPSO nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA diff --git a/src/optimagic/optimizers/nevergrad_optimizers.py b/src/optimagic/optimizers/nevergrad_optimizers.py index 687603f7f..cfae2cd10 100644 --- a/src/optimagic/optimizers/nevergrad_optimizers.py +++ b/src/optimagic/optimizers/nevergrad_optimizers.py @@ -1,7 +1,8 @@ """Implement nevergrad optimizers.""" +import math from dataclasses import dataclass -from typing import Any +from typing import Any, Literal import numpy as np from numpy.typing import NDArray @@ -9,9 +10,7 @@ from optimagic import mark from optimagic.config import IS_NEVERGRAD_INSTALLED from optimagic.exceptions import NotInstalledError -from optimagic.optimization.algo_options import ( - STOPPING_MAXFUN_GLOBAL, -) +from optimagic.optimization.algo_options import STOPPING_MAXFUN_GLOBAL, STOPPING_MAXITER from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, @@ -26,6 +25,69 @@ if IS_NEVERGRAD_INSTALLED: import nevergrad as ng +NEVERGRAD_NOT_INSTALLED_ERROR = ( + "This optimizer requires the 'nevergrad' package to be installed. " + "You can install it with `pip install nevergrad`. " + "Visit https://facebookresearch.github.io/nevergrad/getting_started.html " + "for more detailed installation instructions." +) + + +@mark.minimizer( + name="nevergrad_pso", + solver_type=AggregationLevel.SCALAR, + is_available=IS_NEVERGRAD_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + supports_parallelism=True, + supports_bounds=True, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, +) +@dataclass(frozen=True) +class NevergradPSO(Algorithm): + transform: Literal["arctan", "gaussian", "identity"] = "arctan" + population_size: int | None = None + n_cores: int = 1 + seed: int | None = None + stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL + inertia: float = 0.5 / math.log(2.0) + cognitive: float = 0.5 + math.log(2.0) + social: float = 0.5 + math.log(2.0) + quasi_opp_init: bool = False + speed_quasi_opp_init: bool = False + special_speed_quasi_opp_init: bool = False + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + if not IS_NEVERGRAD_INSTALLED: + raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) + + raw_optimizer = ng.optimizers.ConfPSO( + transform=self.transform, + popsize=self.population_size, + omega=self.inertia, + phip=self.cognitive, + phig=self.social, + qo=self.quasi_opp_init, + sqo=self.speed_quasi_opp_init, + so=self.special_speed_quasi_opp_init, + ) + + res = _nevergrad_internal( + problem=problem, + x0=x0, + raw_optimizer=raw_optimizer, + stopping_maxfun=self.stopping_maxfun, + n_cores=self.n_cores, + seed=self.seed, + ) + + return res + @mark.minimizer( name="nevergrad_cmaes", @@ -51,17 +113,24 @@ class NevergradCMAES(Algorithm): high_speed: bool = False n_cores: PositiveInt = 1 stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL + stopping_maxiter: PositiveInt = STOPPING_MAXITER learning_rate_rank_one_update: NonNegativeFloat = 1.0 learning_rate_rank_mu_update: NonNegativeFloat = 1.0 - ftol: NonNegativeFloat = 1e-11 - xtol: NonNegativeFloat = 1e-11 + convergence_ftol_abs: NonNegativeFloat = 1e-11 + convergence_xtol_abs: NonNegativeFloat = 1e-11 + convergence_ftol_rel: NonNegativeFloat = 0 def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: + if not IS_NEVERGRAD_INSTALLED: + raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) + cma_options = { - "tolx": self.xtol, - "tolfun": self.ftol, + "tolx": self.convergence_xtol_abs, + "tolfun": self.convergence_ftol_abs, + "tolfunrel": self.convergence_ftol_rel, + "maxiter": self.stopping_maxiter, "CMA_rankmu": self.learning_rate_rank_mu_update, "CMA_rankone": self.learning_rate_rank_one_update, } @@ -99,17 +168,9 @@ def _nevergrad_internal( stopping_maxfun: int, seed: int | None, ) -> InternalOptimizeResult: - if not IS_NEVERGRAD_INSTALLED: - raise NotInstalledError( - "The nevergrad_cmaes optimizer requires the 'nevergrad' package to be " - "installed. You can install it with `pip install nevergrad`. " - "Visit https://facebookresearch.github.io/nevergrad/getting_started.html" - " for more detailed installation instructions." - ) - param = ng.p.Array( init=np.clip(x0, problem.bounds.lower, problem.bounds.upper) - ).set_bounds(problem.bounds.lower, upper=problem.bounds.upper) + ).set_bounds(lower=problem.bounds.lower, upper=problem.bounds.upper) instrum = ng.p.Instrumentation(param) From e71d90fe50cf53de61f6fa54aefd60d52cfbfdf9 Mon Sep 17 00:00:00 2001 From: gaurav Date: Wed, 14 May 2025 17:16:04 +0530 Subject: [PATCH 11/17] fix algorithms --- docs/source/algorithms.md | 70 +++++++++++++++++++++++++++++++++------ 1 file changed, 60 insertions(+), 10 deletions(-) diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index d02f397c7..0d2b12b79 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -3984,13 +3984,63 @@ iminuit). - Values greater than 1 specify the maximum number of restart attempts. ``` -## Optimizers from Nevergrad +## Nevergrad Optimizers -optimagic supports the following optimizers from -[Nevergrad](https://facebookresearch.github.io/nevergrad/index.html). To use optimizers -from Nevergrad, you need to have -[Nevergrad](https://facebookresearch.github.io/nevergrad/getting_started.html#installing) -installed (`pip install nevergrad`). Note that nevergrad requires numpy \<= 2.0. +optimagic supports some algorithms from the +[Nevergrad](https://facebookresearch.github.io/nevergrad/index.html) library. To use +these optimizers, you need to have +[the nevergrad package](https://github.com/facebookresearch/nevergrad) installed. +(`pip install nevergrad`). + +```{eval-rst} +.. dropdown:: nevergrad_pso + + .. code-block:: + + "nevergrad_pso" + + Minimize a scalar function using the Particle Swarm Optimization (PSO) algorithm. + + The Particle Swarm Optimization algorithm was originally proposed by + :cite:`Kennedy1995`. The implementation in Nevergrad is based on + :cite:`Zambrano2013`. + + Particle Swarm Optimization (PSO) solves a problem by having a population of + candidate solutions, here dubbed particles, and moving these particles around in the + search-space according to simple mathematical formulae over the particle's position + and velocity. Each particle's movement is influenced by its local best known + position (termed "cognitive" component), but is also guided toward the best known + positions (termed "social" component) in the search-space, which are updated as + better positions are found by other particles. This is expected to move the swarm + toward the best solutions. + + - **transform** (str): The transform to use to map from PSO optimization space to + R-space. Available options are: + - "arctan" (default) + - "identity" + - "gaussian" + - **population_size** (int): Population size of the particle swarm. + - **n_cores** (int): Number of cores to use. + - **seed** (int): Seed used by the internal random number generator. + - **stopping.maxfun** (int): Maximum number of function evaluations. + - **inertia** (float): Inertia weight. Denoted by :math:`\omega`. + Default is 0.7213475204444817. To prevent divergence, the value must be smaller + than 1. It controls the influence of the particle's previous velocity on its + movement. + - **cognitive** (float): Cognitive coefficient. Denoted by :math:`\phi_p`. + Default is 1.1931471805599454. Typical values range from 1.0 to 3.0. It controls + the influence of its own best known position on the particle's movement. + - **social** (float): Social coefficient. Denoted by :math:`\phi_g`. + Default is 1.1931471805599454. Typical values range from 1.0 to 3.0. It controls + the influence of the swarm's best known position on the particle's movement. + - **quasi_opp_init** (bool): Whether to use quasi-opposition initialization. + Default is False. + - **speed_quasi_opp_init** (bool): Whether to use quasi-opposition initialization + for speed. Default is False. + - **special_speed_quasi_opp_init** (bool): Whether to use special quasi-opposition + initialization for speed. Default is False. + +``` ```{eval-rst} .. dropdown:: nevegrad_cmaes @@ -4038,10 +4088,10 @@ installed (`pip install nevergrad`). Note that nevergrad requires numpy \<= 2.0. - **stopping.maxfun** (int): The maximum number of criterion evaluations. - - - **ftol** (float): stopping criteria on the x tolerance. - - - **xtol** (float): stopping criteria on the f tolerance. + - **stopping.maxiter** (int): The maximum number of iterations. + - **convergence_ftol_abs** (float): stopping criteria on the f tolerance. + - **convergence_ftol_rel** (float): relative stopping criteria on the f tolerance. + - **convergence_xtol_abs** (float): stopping criteria on the x tolerance. - **seed** (int): seed used by the internal random number generator. From 2abc1d1c6a3b94ced2dfb32b6d2a086bbc6b7833 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 14 May 2025 11:53:02 +0000 Subject: [PATCH 12/17] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- docs/source/algorithms.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index 134a72c5d..0d2b12b79 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -3984,7 +3984,6 @@ iminuit). - Values greater than 1 specify the maximum number of restart attempts. ``` - ## Nevergrad Optimizers optimagic supports some algorithms from the From e3ac50e3866fc5faa1a24f31c54d107cf7735920 Mon Sep 17 00:00:00 2001 From: gauravmanmode Date: Wed, 14 May 2025 17:32:01 +0530 Subject: [PATCH 13/17] Update refs.bib --- docs/source/refs.bib | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/refs.bib b/docs/source/refs.bib index cbccad275..0115cff27 100644 --- a/docs/source/refs.bib +++ b/docs/source/refs.bib @@ -915,6 +915,7 @@ @misc{Hansen2023 archivePrefix={arXiv}, primaryClass={cs.LG}, url={https://arxiv.org/abs/1604.00772}, +} @InProceedings{Kennedy1995, author={Kennedy, J. and Eberhart, R.}, From f16ceef958afd28b95966ee8f6372d175571430d Mon Sep 17 00:00:00 2001 From: gaurav Date: Tue, 3 Jun 2025 18:53:44 +0530 Subject: [PATCH 14/17] add docstrings , add more options in cmaes, improve types --- .../optimizers/nevergrad_optimizers.py | 209 +++++++++++++++--- 1 file changed, 181 insertions(+), 28 deletions(-) diff --git a/src/optimagic/optimizers/nevergrad_optimizers.py b/src/optimagic/optimizers/nevergrad_optimizers.py index cfae2cd10..1c8fd20d3 100644 --- a/src/optimagic/optimizers/nevergrad_optimizers.py +++ b/src/optimagic/optimizers/nevergrad_optimizers.py @@ -2,7 +2,7 @@ import math from dataclasses import dataclass -from typing import Any, Literal +from typing import Literal import numpy as np from numpy.typing import NDArray @@ -17,7 +17,6 @@ ) from optimagic.typing import ( AggregationLevel, - Callable, NonNegativeFloat, PositiveInt, ) @@ -48,6 +47,39 @@ ) @dataclass(frozen=True) class NevergradPSO(Algorithm): + """Particle Swarm Optimization (PSO) optimizer from Nevergrad. + + Args: + transform: The transform to use to map from PSO optimization space to R-space. + Available options are: + - "arctan": Uses arctan transformation for bounded optimization + - "identity": No transformation (for unbounded optimization) + - "gaussian": Uses Gaussian transformation for bounded optimization + population_size: Population size of the particle swarm. If None, it will be + set to 4 * n_parameters. Larger values may improve optimization quality + but increase computational cost. + n_cores: Number of cores to use for parallel function evaluation. + seed: Seed used by the internal random number generator for reproducibility. + stopping_maxfun: Maximum number of function evaluations before termination. + inertia: Inertia weight (ω). Controls the influence of the particle's previous + velocity on its movement. Must be smaller than 1 to prevent divergence. + cognitive: Cognitive coefficient (φₚ). Controls the influence of the particle's + own best known position. + social: Social coefficient (φ₉). Controls the influence of the swarm's best + known position. + quasi_opp_init: If True, uses quasi-opposition initialization for particle + positions. This can improve optimization by providing better initial + coverage + of the search space. + speed_quasi_opp_init: If True, uses quasi-opposition initialization for particle + velocities. This can help in exploring the search space more effectively. + special_speed_quasi_opp_init: If True, uses a special quasi-opposition + initialization + for velocities. This is an experimental feature that may improve performance + on certain problems. + + """ + transform: Literal["arctan", "gaussian", "identity"] = "arctan" population_size: int | None = None n_cores: int = 1 @@ -66,7 +98,7 @@ def _solve_internal_problem( if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) - raw_optimizer = ng.optimizers.ConfPSO( + configured_optimizer = ng.optimizers.ConfPSO( transform=self.transform, popsize=self.population_size, omega=self.inertia, @@ -80,7 +112,7 @@ def _solve_internal_problem( res = _nevergrad_internal( problem=problem, x0=x0, - raw_optimizer=raw_optimizer, + configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, @@ -104,21 +136,103 @@ def _solve_internal_problem( ) @dataclass(frozen=True) class NevergradCMAES(Algorithm): + """Covariance Matrix Adaptation Evolution Strategy (CMA-ES) optimizer from + Nevergrad. + + Args: + scale: Scale of the search. + elitist: Whether to switch to elitist mode (also known as (μ,λ)-CMA-ES). + In elitist mode, the best point in the population is always retained. + population_size: Population size. + diagonal: Use the diagonal version of CMA, which is more efficient for + high-dimensional problems. + high_speed: Use a metamodel for recommendation to speed up optimization. + fast_cmaes: Use the fast CMA-ES implementation. Cannot be used with + diagonal=True. + Produces equivalent results and is preferable for high dimensions or when + objective function evaluations are fast. + random_init: If True, initialize the optimizer with random parameters. + n_cores: Number of cores to use for parallel function evaluation. + step_size_adaptive: Whether to adapt the step size. Can be a boolean or a string + specifying the adaptation strategy. + step_size_damping_factor: Damping factor for step size adaptation. + step_size_damping_rate: Damping rate for step size adaptation. + step_size_update_squared: Whether to use squared step sizes in updates. + learning_rate_cov_mat_update: Learning rate for the covariance matrix update. + learning_rate_rank_one_update: Multiplier for the rank-one update learning rate + of the covariance matrix. + learning_rate_rank_mu_update: Multiplier for the rank-mu update learning rate + of the covariance matrix. + learning_rate_mean_update: Learning rate for the mean update. + learning_rate_diagonal_update: Learning rate for the diagonal update. + num_parents: Number of parents(μ) for recombination. + negative_update: Whether to use negative updates for the covariance matrix. + mirror_sampling_strategy: Strategy for mirror sampling. Possible values are: + 0: Unconditional mirroring + 1: Selective mirroring + 2: Selective mirroring with delay (default) + normalize_cov_trace: How to normalize the trace of the covariance matrix. + Valid values are: + - False: No normalization + - True: Normalize to 1 + - "arithm": Arithmetic mean normalization + - "geom": Geometric mean normalization + - "aeig": Arithmetic mean of eigenvalues + - "geig": Geometric mean of eigenvalues + diag_covariance_iters: Number of iterations to use diagonal covariance matrix + before switching to full matrix. If False, always use full matrix. + stopping_maxfun: Maximum number of function evaluations before termination. + stopping_maxiter: Maximum number of iterations before termination. + stopping_timeout: Maximum time in seconds before termination. + stopping_cov_mat_cond: Maximum condition number of the covariance matrix before + termination. + convergence_ftol_abs: Absolute tolerance on function value changes for + convergence. + convergence_ftol_rel: Relative tolerance on function value changes for + convergence. + convergence_xtol_abs: Absolute tolerance on parameter changes for convergence. + convergence_iter_noimprove: Number of iterations without improvement before + termination. + invariant_path: Whether evolution path (pc) should be invariant to + transformations. + eval_final_mean: Whether to evaluate the final mean solution. + seed: Seed used by the internal random number generator for reproducibility. + + """ + scale: NonNegativeFloat = 1.0 - seed: int | None = None - population_size: int | None = None elitist: bool = False + population_size: int | None = None diagonal: bool = False - use_fast_implementation: bool = False high_speed: bool = False + fast_cmaes: bool = False + random_init: bool = False n_cores: PositiveInt = 1 + step_size_adaptive: bool | str = True + step_size_damping_factor: float = 1.0 + step_size_damping_rate: float = 0.1 + step_size_update_squared: bool = False + learning_rate_cov_mat_update: float = 1.0 + learning_rate_rank_one_update: float = 1.0 + learning_rate_rank_mu_update: float = 1.0 + learning_rate_mean_update: float = 1.0 + learning_rate_diagonal_update: float = 0.0 + num_parents: int | None = None + negative_update: bool = True + mirror_sampling_strategy: Literal[0, 1, 2] = 2 + normalize_cov_trace: bool | Literal["arithm", "geom", "aeig", "geig"] = False + diag_covariance_iters: int | bool = False stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL stopping_maxiter: PositiveInt = STOPPING_MAXITER - learning_rate_rank_one_update: NonNegativeFloat = 1.0 - learning_rate_rank_mu_update: NonNegativeFloat = 1.0 - convergence_ftol_abs: NonNegativeFloat = 1e-11 - convergence_xtol_abs: NonNegativeFloat = 1e-11 - convergence_ftol_rel: NonNegativeFloat = 0 + stopping_timeout: float = float("inf") + stopping_cov_mat_cond: float = 1e14 + convergence_ftol_abs: float = 1e-11 + convergence_ftol_rel: float = 0.0 + convergence_xtol_abs: float = 1e-11 + convergence_iter_noimprove: int | None = None + invariant_path: bool = False + eval_final_mean: bool = True + seed: int | None = None def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] @@ -127,60 +241,96 @@ def _solve_internal_problem( raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) cma_options = { - "tolx": self.convergence_xtol_abs, + "AdaptSigma": self.step_size_adaptive, + "CSA_dampfac": self.step_size_damping_factor, + "CMA_dampsvec_fade": self.step_size_damping_rate, + "CSA_squared": self.step_size_update_squared, + "CSA_invariant_path": self.invariant_path, + "CMA_on": self.learning_rate_cov_mat_update, + "CMA_rankone": self.learning_rate_rank_one_update, + "CMA_rankmu": self.learning_rate_rank_mu_update, + "CMA_cmean": self.learning_rate_mean_update, + "CMA_diagonal_decoding": self.learning_rate_diagonal_update, + "CMA_mu": self.num_parents, + "CMA_active": self.negative_update, + "CMA_mirrormethod": self.mirror_sampling_strategy, + "CMA_const_trace": self.normalize_cov_trace, + "CMA_diagonal": self.diag_covariance_iters, + "maxfevals": self.stopping_maxfun, + "maxiter": self.stopping_maxiter, + "timeout": self.stopping_timeout, + "tolconditioncov": self.stopping_cov_mat_cond, "tolfun": self.convergence_ftol_abs, "tolfunrel": self.convergence_ftol_rel, - "maxiter": self.stopping_maxiter, - "CMA_rankmu": self.learning_rate_rank_mu_update, - "CMA_rankone": self.learning_rate_rank_one_update, + "tolx": self.convergence_xtol_abs, + "tolstagnation": self.convergence_iter_noimprove, + "eval_final_mean": self.eval_final_mean, } - raw_optimizer = ng.optimizers.ParametrizedCMA( + configured_optimizer = ng.optimizers.ParametrizedCMA( scale=self.scale, popsize=self.population_size, elitist=self.elitist, diagonal=self.diagonal, high_speed=self.high_speed, - fcmaes=self.use_fast_implementation, + fcmaes=self.fast_cmaes, inopts=cma_options, ) res = _nevergrad_internal( problem=problem, x0=x0, - raw_optimizer=raw_optimizer, + configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, ) - return res -""" helper function for nevergrad""" - - def _nevergrad_internal( problem: InternalOptimizationProblem, x0: NDArray[np.float64], n_cores: int, - raw_optimizer: Callable[..., Any], + configured_optimizer: ng.optimization.base.ConfiguredOptimizer, stopping_maxfun: int, seed: int | None, ) -> InternalOptimizeResult: - param = ng.p.Array( - init=np.clip(x0, problem.bounds.lower, problem.bounds.upper) - ).set_bounds(lower=problem.bounds.lower, upper=problem.bounds.upper) + """Internal helper function for nevergrad. + + Handles the optimization loop for Nevergrad optimizers, including parameter bounds, + parallel function evaluation, and result. + + Args: + problem (InternalOptimizationProblem): Internal optimization problem to solve. + x0 (np.ndarray): Initial parameter vector of shape (n_params,). + n_cores (int): Number of processes used to parallelize the function + evaluations. + configured_optimizer (ConfiguredOptimizer): Nevergrad optimizer instance + configured with options. + stopping_maxfun (int): Maximum number of function evaluations. + seed (int): Random seed for reproducibility. Defaults to None. + + Returns: + InternalOptimizeResult: Internal optimization result + + """ + + param = ng.p.Array(init=x0).set_bounds( + lower=problem.bounds.lower, + upper=problem.bounds.upper, + ) instrum = ng.p.Instrumentation(param) if seed is not None: instrum.random_state.seed(seed) - optimizer = raw_optimizer( + optimizer = configured_optimizer( parametrization=instrum, budget=stopping_maxfun, num_workers=n_cores ) + # optimization loop using the ask-and-tell interface while optimizer.num_ask < stopping_maxfun: x_list = [ optimizer.ask() @@ -193,6 +343,9 @@ def _nevergrad_internal( recommendation = optimizer.provide_recommendation() best_x = recommendation.value[0][0] loss = recommendation.loss + + # in some cases, loss is not provided by the optimizer, so we need to + # evaluate it manually using problem.fun if loss is None: loss = problem.fun(best_x) From a36bde0f1c058b7c15963cfe99a98310ab2a5f12 Mon Sep 17 00:00:00 2001 From: gaurav Date: Tue, 3 Jun 2025 19:03:17 +0530 Subject: [PATCH 15/17] add docs --- docs/source/algorithms.md | 118 ++++++++++++++++++++++++++++++-------- 1 file changed, 94 insertions(+), 24 deletions(-) diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index 0d2b12b79..30450303a 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -4060,46 +4060,116 @@ these optimizers, you need to have **Optimizer Parameters:** - - **scale** (float): Scale of the search. + - **scale** (float): Scale of the search. + - Default: `1.0` - - **elitist** (bool): Whether to switch to elitist mode, i.e., `+` mode instead of `,` mode, where the best point in the population is always retained. - - **population_size** (Optional[int]): Population size. Should be + - **elitist** (bool): Whether to switch to elitist mode (μ,λ)-CMA-ES, where the best point in the population is always retained. + - Default: `False` - .. math:: + - **population_size** (Optional[int]): Population size. If None, it will be set to: - \text{population_size} = n \times \text{num_workers} \quad \text{for integer } n \geq 1 + .. math:: + \max\left(\text{num_workers}, 4 + \left\lfloor 3 \cdot \log(\text{dimension}) \right\rfloor\right) - Default is + - **diagonal** (bool): Use the diagonal version of CMA, which is more efficient for high-dimensional problems. + - Default: `False` - .. math:: + - **high_speed** (bool): Use a metamodel for recommendation to speed up optimization. + - Default: `False` - \max\left( \text{num_workers},\ 4 + \left\lfloor 3 \cdot \log(\text{dimension}) \right\rfloor \right) + - **fast_cmaes** (bool): Use the fast CMA-ES implementation. + - Cannot be used with `diagonal=True`. + - Default: `False` - - **popsize_factor** (float): Factor used in the formula for computing the population size. - - Default is `3.0`. + - **random_init** (bool): If True, initialize the optimizer with random parameters. + - Default: `False` - - **diagonal** (bool): Use the diagonal version of CMA, which is advised for high-dimensional problems. + - **n_cores** (int): Number of cores to use for parallel function evaluation. + - Default: `1` - - **high_speed** (bool): Use a metamodel for recommendation to speed up optimization. + - **step_size_adaptive** (bool | str): Whether to adapt the step size. Can be a boolean or a string specifying the adaptation strategy. + - Default: `True` - - **use_fast_implementation** (bool): Use the fast CMA-ES implementation. - - Cannot be used with `diagonal=True`. - - Produces equivalent results and is preferable for high dimensions or when objective function evaluations are fast. + - **step_size_damping_factor** (float): Damping factor for step size adaptation. + - Default: `1.0` - - **stopping.maxfun** (int): The maximum number of criterion - evaluations. - - **stopping.maxiter** (int): The maximum number of iterations. - - **convergence_ftol_abs** (float): stopping criteria on the f tolerance. - - **convergence_ftol_rel** (float): relative stopping criteria on the f tolerance. - - **convergence_xtol_abs** (float): stopping criteria on the x tolerance. + - **step_size_damping_rate** (float): Damping rate for step size adaptation. + - Default: `0.1` - - **seed** (int): seed used by the internal random number generator. + - **step_size_update_squared** (bool): Whether to use squared step sizes in updates. + - Default: `False` + + - **learning_rate_cov_mat_update** (float): Learning rate for the covariance matrix update. + - Default: `1.0` - **learning_rate_rank_one_update** (float): Multiplier for the rank-one update learning rate of the covariance matrix. - - Default is `1.0`. + - Default: `1.0` - **learning_rate_rank_mu_update** (float): Multiplier for the rank-mu update learning rate of the covariance matrix. - - Default is `1.0`. + - Default: `1.0` + + - **learning_rate_mean_update** (float): Learning rate for the mean update. + - Default: `1.0` + + - **learning_rate_diagonal_update** (float): Learning rate for the diagonal update. + - Default: `0.0` + + - **num_parents** (Optional[int]): Number of parents (μ) for recombination. + - Default: `None` (automatically determined) + + - **negative_update** (bool): Whether to use negative updates for the covariance matrix. + - Default: `True` + + - **mirror_sampling_strategy** (int): Strategy for mirror sampling. + - `0`: Unconditional mirroring + - `1`: Selective mirroring + - `2`: Selective mirroring with delay (default) + + - **normalize_cov_trace** (bool | str): How to normalize the trace of the covariance matrix. + - `False`: No normalization + - `True`: Normalize to 1 + - `"arithm"`: Arithmetic mean normalization + - `"geom"`: Geometric mean normalization + - `"aeig"`: Arithmetic mean of eigenvalues + - `"geig"`: Geometric mean of eigenvalues + - Default: `False` + + - **diag_covariance_iters** (int | bool): Number of iterations to use diagonal covariance matrix before switching to full matrix. + - If `False`, always use full matrix. + - Default: `False` + + - **stopping_maxfun** (int): Maximum number of function evaluations before termination. + - Default: `STOPPING_MAXFUN_GLOBAL` + + - **stopping_maxiter** (int): Maximum number of iterations before termination. + - Default: `STOPPING_MAXITER` + + - **stopping_timeout** (float): Maximum time in seconds before termination. + - Default: `float("inf")` + + - **stopping_cov_mat_cond** (float): Maximum condition number of the covariance matrix before termination. + - Default: `1e14` + + - **convergence_ftol_abs** (float): Absolute tolerance on function value changes for convergence. + - Default: `1e-11` + + - **convergence_ftol_rel** (float): Relative tolerance on function value changes for convergence. + - Default: `0.0` + + - **convergence_xtol_abs** (float): Absolute tolerance on parameter changes for convergence. + - Default: `1e-11` + + - **convergence_iter_noimprove** (Optional[int]): Number of iterations without improvement before termination. + - Default: `None` + + - **invariant_path** (bool): Whether evolution path (pc) should be invariant to transformations. + - Default: `False` + + - **eval_final_mean** (bool): Whether to evaluate the final mean solution. + - Default: `True` + + - **seed** (Optional[int]): Seed used by the internal random number generator for reproducibility. + - Default: `None` ``` From 5fe5f26ea6e3520ade5816c7313df2a1ec1b73d3 Mon Sep 17 00:00:00 2001 From: gaurav Date: Wed, 4 Jun 2025 00:39:25 +0530 Subject: [PATCH 16/17] add docs --- docs/source/algorithms.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index 30450303a..30a9a6fe3 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -4049,6 +4049,8 @@ these optimizers, you need to have "nevergrad_cmaes" + Minimize a scalar function using the Covariance Matrix Adaptation Evolution Strategy (CMA-ES) algorithm. + The Covariance Matrix Adaptation Evolution Strategy (CMA-ES) is a stochastic derivative-free numerical optimization algorithm for difficult (non-convex, ill-conditioned, multi-modal, rugged, noisy) optimization problems in continuous search spaces. The version available through nevergrad wraps an external implementation `pycma `_. @@ -4058,8 +4060,6 @@ these optimizers, you need to have The fast implementation relies on fcmaes which can be installed with `pip install fcmaes`. - **Optimizer Parameters:** - - **scale** (float): Scale of the search. - Default: `1.0` From 209ab05eda2a343a58a9ffabef3236ebe2a1909c Mon Sep 17 00:00:00 2001 From: gaurav Date: Wed, 4 Jun 2025 00:48:16 +0530 Subject: [PATCH 17/17] handle infinite bounds --- src/optimagic/optimizers/nevergrad_optimizers.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/optimagic/optimizers/nevergrad_optimizers.py b/src/optimagic/optimizers/nevergrad_optimizers.py index 1c8fd20d3..696f69c53 100644 --- a/src/optimagic/optimizers/nevergrad_optimizers.py +++ b/src/optimagic/optimizers/nevergrad_optimizers.py @@ -316,7 +316,16 @@ def _nevergrad_internal( """ - param = ng.p.Array(init=x0).set_bounds( + if not ( + problem.bounds.lower is not None + and problem.bounds.upper is not None + and np.all(np.isfinite(problem.bounds.lower)) + and np.all(np.isfinite(problem.bounds.upper)) + ): + raise ValueError("Bounds cannot be None or infinite.") + + param = ng.p.Array( + init=x0, lower=problem.bounds.lower, upper=problem.bounds.upper, ) @@ -344,7 +353,7 @@ def _nevergrad_internal( best_x = recommendation.value[0][0] loss = recommendation.loss - # in some cases, loss is not provided by the optimizer, so we need to + # in some cases, loss is not provided by the optimizer, in that case, # evaluate it manually using problem.fun if loss is None: loss = problem.fun(best_x)