diff --git a/binminpy/BinMinBottomUp.py b/binminpy/BinMinBottomUp.py index ac1c047..ecdee90 100644 --- a/binminpy/BinMinBottomUp.py +++ b/binminpy/BinMinBottomUp.py @@ -25,34 +25,36 @@ class BinMinBottomUp(BinMinBase): RESULT_TAG = 2 TERMINATE_TAG = 3 - def __init__(self, target_function, binning_tuples, args=(), - guide_function=None, bin_check_function=None, + def __init__(self, target_function, binning_tuples, args=(), + guide_function=None, bin_check_function=None, callback=None, callback_on_rank_0=True, - sampler="latinhypercube", + sampler="latinhypercube", optimizer="minimize", optimizer_kwargs={}, - sampled_parameters=(), + sampled_parameters=(), set_eval_points=None, set_eval_points_on_rank_0=True, initial_optimizer="minimize", n_initial_points=10, - initial_optimizer_kwargs={}, + initial_optimizer_kwargs={}, n_sampler_points_per_bin=10, inherit_best_init_point_within_bin=False, accept_target_below=np.inf, accept_delta_target_below=np.inf, accept_guide_below=np.inf, accept_delta_guide_below=np.inf, - save_evals=False, return_evals=False, - return_bin_results=True, return_bin_centers=True, + save_evals=False, return_evals=False, + return_bin_results=True, return_bin_centers=True, optima_comparison_rtol=1e-9, optima_comparison_atol=0.0, neighborhood_distance=1, - n_optim_restarts_per_bin=1, n_tasks_per_batch=1, + n_optim_restarts_per_bin=1, n_tasks_per_batch=1, print_progress_every_n_batch=100, max_tasks_per_worker=np.inf, max_n_bins=np.inf, max_tasks_in_memory=np.inf, task_dump_file=None, skip_initial_optimization=False, initial_points=None, + comm=None, ): """Constructor.""" self.print_prefix = "BinMinBottomUp:" - comm = MPI.COMM_WORLD + self.comm = comm if comm is not None else MPI.COMM_WORLD + comm = self.comm rank = comm.Get_rank() size = comm.Get_size() self.n_workers = size - 1 @@ -729,7 +731,7 @@ def run(self): global _y_points_per_rank global _g_points_per_rank - comm = MPI.COMM_WORLD + comm = self.comm rank = comm.Get_rank() size = comm.Get_size() diff --git a/binminpy/BinMinMPI.py b/binminpy/BinMinMPI.py index 4eab159..ddbca37 100644 --- a/binminpy/BinMinMPI.py +++ b/binminpy/BinMinMPI.py @@ -10,13 +10,14 @@ class BinMinMPI(BinMin): - def __init__(self, target_function, binning_tuples, optimizer="minimize", optimizer_kwargs={}, + def __init__(self, target_function, binning_tuples, optimizer="minimize", optimizer_kwargs={}, return_evals=False, return_bin_results=True, return_bin_centers=True, optima_comparison_rtol=1e-9, optima_comparison_atol=0.0, - n_restarts_per_bin=1, task_distribution="even", n_tasks_per_batch=1, max_tasks_per_worker=np.inf, - bin_masking=None, options={}): + n_restarts_per_bin=1, task_distribution="even", n_tasks_per_batch=1, max_tasks_per_worker=np.inf, + bin_masking=None, options={}, comm=None): """Constructor.""" - comm = MPI.COMM_WORLD + self.comm = comm if comm is not None else MPI.COMM_WORLD + comm = self.comm rank = comm.Get_rank() size = comm.Get_size() @@ -69,7 +70,7 @@ def run_even_task_distribution(self): """ """Distribute the optimization tasks via MPI and collect results on rank 0.""" - comm = MPI.COMM_WORLD + comm = self.comm rank = comm.Get_rank() size = comm.Get_size() @@ -173,7 +174,7 @@ def run_dynamic_task_distribution(self): On rank 0: a dictionary containing global optimization results. On other ranks: None. """ - comm = MPI.COMM_WORLD + comm = self.comm rank = comm.Get_rank() size = comm.Get_size() @@ -316,7 +317,7 @@ def run_mcmc_task_distribution(self): """ from scipy.stats.qmc import LatinHypercube - comm = MPI.COMM_WORLD + comm = self.comm rank = comm.Get_rank() size = comm.Get_size() diff --git a/binminpy/interface_functions.py b/binminpy/interface_functions.py index b0c5950..6f02101 100644 --- a/binminpy/interface_functions.py +++ b/binminpy/interface_functions.py @@ -2,10 +2,10 @@ # Helper functions -def _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_evals, return_bin_results, - return_bin_centers, optima_comparison_rtol, optima_comparison_atol, +def _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_evals, return_bin_results, + return_bin_centers, optima_comparison_rtol, optima_comparison_atol, n_restarts_per_bin, parallelization, max_processes, task_distribution, - options, n_tasks_per_batch, max_tasks_per_worker, bin_masking): + options, n_tasks_per_batch, max_tasks_per_worker, bin_masking, comm=None): """Helper function to start the optimizer with the requested parallelization. """ # Check the parallelization argument. @@ -91,6 +91,7 @@ def _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_eval n_tasks_per_batch=n_tasks_per_batch, max_tasks_per_worker=max_tasks_per_worker, bin_masking=bin_masking, + comm=comm, ) output = binned_opt.run() return output @@ -103,10 +104,10 @@ def _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_eval # Below is a collection of functions to allow using binminpy # through an interface similar to scipy.optimize. -def minimize(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, +def minimize(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, optima_comparison_rtol=1e-6, optima_comparison_atol=1e-2, n_restarts_per_bin=1, parallelization=None, max_processes=1, task_distribution="even", - options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, **kwargs): + options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, comm=None, **kwargs): """Do binned optimization with scipy.optimize.minimize. @@ -117,16 +118,16 @@ def minimize(fun, binning_tuples, return_evals=False, return_bin_results=True, r optimizer_kwargs = dict(kwargs) return _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_evals, return_bin_results, - return_bin_centers, optima_comparison_rtol, optima_comparison_atol, + return_bin_centers, optima_comparison_rtol, optima_comparison_atol, n_restarts_per_bin, parallelization, max_processes, task_distribution, - options, n_tasks_per_batch, max_tasks_per_worker, bin_masking) + options, n_tasks_per_batch, max_tasks_per_worker, bin_masking, comm) -def differential_evolution(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, +def differential_evolution(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, optima_comparison_rtol=1e-6, optima_comparison_atol=1e-2, n_restarts_per_bin=1, parallelization=None, max_processes=1, task_distribution="even", - options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, **kwargs): + options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, comm=None, **kwargs): """Do binned optimization with scipy.optimize.differential_evolution as the optimizer. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.differential_evolution.html#scipy.optimize.differential_evolution @@ -136,16 +137,16 @@ def differential_evolution(fun, binning_tuples, return_evals=False, return_bin_r optimizer_kwargs = dict(kwargs) return _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_evals, return_bin_results, - return_bin_centers, optima_comparison_rtol, optima_comparison_atol, + return_bin_centers, optima_comparison_rtol, optima_comparison_atol, n_restarts_per_bin, parallelization, max_processes, task_distribution, - options, n_tasks_per_batch, max_tasks_per_worker, bin_masking) + options, n_tasks_per_batch, max_tasks_per_worker, bin_masking, comm) -def basinhopping(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, +def basinhopping(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, optima_comparison_rtol=1e-6, optima_comparison_atol=1e-2, n_restarts_per_bin=1, parallelization=None, max_processes=1, task_distribution="even", - options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, **kwargs): + options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, comm=None, **kwargs): """Do binned optimization with scipy.optimize.basinhopping as the optimizer. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.basinhopping.html#scipy.optimize.basinhopping @@ -155,18 +156,18 @@ def basinhopping(fun, binning_tuples, return_evals=False, return_bin_results=Tru optimizer_kwargs = dict(kwargs) return _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_evals, return_bin_results, - return_bin_centers, optima_comparison_rtol, optima_comparison_atol, + return_bin_centers, optima_comparison_rtol, optima_comparison_atol, n_restarts_per_bin, parallelization, max_processes, task_distribution, - options, n_tasks_per_batch, max_tasks_per_worker, bin_masking) + options, n_tasks_per_batch, max_tasks_per_worker, bin_masking, comm) -def shgo(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, +def shgo(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, optima_comparison_rtol=1e-6, optima_comparison_atol=1e-2, n_restarts_per_bin=1, parallelization=None, max_processes=1, task_distribution="even", - options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, **kwargs): + options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, comm=None, **kwargs): """Do binned optimization with scipy.optimize.shgo as the optimizer. - + See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.shgo.html#scipy.optimize.shgo """ @@ -174,16 +175,16 @@ def shgo(fun, binning_tuples, return_evals=False, return_bin_results=True, retur optimizer_kwargs = dict(kwargs) return _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_evals, return_bin_results, - return_bin_centers, optima_comparison_rtol, optima_comparison_atol, + return_bin_centers, optima_comparison_rtol, optima_comparison_atol, n_restarts_per_bin, parallelization, max_processes, task_distribution, - options, n_tasks_per_batch, max_tasks_per_worker, bin_masking) + options, n_tasks_per_batch, max_tasks_per_worker, bin_masking, comm) -def dual_annealing(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, - optima_comparison_rtol=1e-6, optima_comparison_atol=1e-2, +def dual_annealing(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, + optima_comparison_rtol=1e-6, optima_comparison_atol=1e-2, n_restarts_per_bin=1, parallelization=None, max_processes=1, task_distribution="even", - options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, **kwargs): + options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, comm=None, **kwargs): """Do binned optimization with scipy.optimize.dual_annealing as the optimizer. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.dual_annealing.html#scipy.optimize.dual_annealing @@ -193,16 +194,16 @@ def dual_annealing(fun, binning_tuples, return_evals=False, return_bin_results=T optimizer_kwargs = dict(kwargs) return _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_evals, return_bin_results, - return_bin_centers, optima_comparison_rtol, optima_comparison_atol, + return_bin_centers, optima_comparison_rtol, optima_comparison_atol, n_restarts_per_bin, parallelization, max_processes, task_distribution, - options, n_tasks_per_batch, max_tasks_per_worker, bin_masking) + options, n_tasks_per_batch, max_tasks_per_worker, bin_masking, comm) -def direct(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, +def direct(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, optima_comparison_rtol=1e-6, optima_comparison_atol=1e-2, n_restarts_per_bin=1, parallelization=None, max_processes=1, task_distribution="even", - options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, **kwargs): + options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, comm=None, **kwargs): """Do binned optimization with scipy.optimize.direct as the optimizer. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.direct.html#scipy.optimize.direct @@ -210,18 +211,18 @@ def direct(fun, binning_tuples, return_evals=False, return_bin_results=True, ret optimizer = "direct" optimizer_kwargs = dict(kwargs) - + return _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_evals, return_bin_results, - return_bin_centers, optima_comparison_rtol, optima_comparison_atol, + return_bin_centers, optima_comparison_rtol, optima_comparison_atol, n_restarts_per_bin, parallelization, max_processes, task_distribution, - options, n_tasks_per_batch, max_tasks_per_worker, bin_masking) + options, n_tasks_per_batch, max_tasks_per_worker, bin_masking, comm) -def iminuit(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, +def iminuit(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, optima_comparison_rtol=1e-6, optima_comparison_atol=1e-2, n_restarts_per_bin=1, parallelization=None, max_processes=1, task_distribution="even", - options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, **kwargs): + options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, comm=None, **kwargs): """Do binned optimization with iminuit.minimize as the optimizer. See https://scikit-hep.org/iminuit/reference.html#scipy-like-interface @@ -229,18 +230,18 @@ def iminuit(fun, binning_tuples, return_evals=False, return_bin_results=True, re optimizer = "iminuit" optimizer_kwargs = dict(kwargs) - + return _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_evals, return_bin_results, - return_bin_centers, optima_comparison_rtol, optima_comparison_atol, + return_bin_centers, optima_comparison_rtol, optima_comparison_atol, n_restarts_per_bin, parallelization, max_processes, task_distribution, - options, n_tasks_per_batch, max_tasks_per_worker, bin_masking) + options, n_tasks_per_batch, max_tasks_per_worker, bin_masking, comm) -def diver(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, +def diver(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, optima_comparison_rtol=1e-6, optima_comparison_atol=1e-2, n_restarts_per_bin=1, parallelization=None, max_processes=1, task_distribution="even", - options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, **kwargs): + options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, comm=None, **kwargs): """Do binned optimization with diver as the optimizer. See https://github.com/diveropt/Diver @@ -248,58 +249,58 @@ def diver(fun, binning_tuples, return_evals=False, return_bin_results=True, retu optimizer = "diver" optimizer_kwargs = dict(kwargs) - + return _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_evals, return_bin_results, - return_bin_centers, optima_comparison_rtol, optima_comparison_atol, + return_bin_centers, optima_comparison_rtol, optima_comparison_atol, n_restarts_per_bin, parallelization, max_processes, task_distribution, - options, n_tasks_per_batch, max_tasks_per_worker, bin_masking) + options, n_tasks_per_batch, max_tasks_per_worker, bin_masking, comm) -def bincenter(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, +def bincenter(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, optima_comparison_rtol=1e-6, optima_comparison_atol=1e-2, n_restarts_per_bin=1, parallelization=None, max_processes=1, task_distribution="even", - options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, **kwargs): + options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, comm=None, **kwargs): """Simply evaluate the target function at the center of each bin.""" optimizer = "bincenter" optimizer_kwargs = dict(kwargs) - + return _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_evals, return_bin_results, - return_bin_centers, optima_comparison_rtol, optima_comparison_atol, + return_bin_centers, optima_comparison_rtol, optima_comparison_atol, n_restarts_per_bin, parallelization, max_processes, task_distribution, - options, n_tasks_per_batch, max_tasks_per_worker, bin_masking) + options, n_tasks_per_batch, max_tasks_per_worker, bin_masking, comm) -def random(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, +def random(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, optima_comparison_rtol=1e-6, optima_comparison_atol=1e-2, n_restarts_per_bin=1, parallelization=None, max_processes=1, task_distribution="even", - options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, **kwargs): + options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, comm=None, **kwargs): """Simply evaluate the target function at the center of each bin.""" optimizer = "random" optimizer_kwargs = dict(kwargs) - + return _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_evals, return_bin_results, - return_bin_centers, optima_comparison_rtol, optima_comparison_atol, + return_bin_centers, optima_comparison_rtol, optima_comparison_atol, n_restarts_per_bin, parallelization, max_processes, task_distribution, - options, n_tasks_per_batch, max_tasks_per_worker, bin_masking) + options, n_tasks_per_batch, max_tasks_per_worker, bin_masking, comm) -def latinhypercube(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, +def latinhypercube(fun, binning_tuples, return_evals=False, return_bin_results=True, return_bin_centers=True, optima_comparison_rtol=1e-6, optima_comparison_atol=1e-2, n_restarts_per_bin=1, parallelization=None, max_processes=1, task_distribution="even", - options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, **kwargs): + options={}, n_tasks_per_batch=1, max_tasks_per_worker=np.inf, bin_masking=None, comm=None, **kwargs): """Use latin hypercube sampling to evaluate the target at a fixed number of points within each bin. """ optimizer = "latinhypercube" optimizer_kwargs = dict(kwargs) - + return _run_optimizer(fun, binning_tuples, optimizer, optimizer_kwargs, return_evals, return_bin_results, - return_bin_centers, optima_comparison_rtol, optima_comparison_atol, + return_bin_centers, optima_comparison_rtol, optima_comparison_atol, n_restarts_per_bin, parallelization, max_processes, task_distribution, - options, n_tasks_per_batch, max_tasks_per_worker, bin_masking) + options, n_tasks_per_batch, max_tasks_per_worker, bin_masking, comm) diff --git a/tests/test_custom_comm.py b/tests/test_custom_comm.py new file mode 100644 index 0000000..b3ea0df --- /dev/null +++ b/tests/test_custom_comm.py @@ -0,0 +1,73 @@ +"""Test that custom MPI communicators work correctly.""" +import numpy as np +from mpi4py import MPI +from binminpy.BinMinMPI import BinMinMPI + +def target_function(x): + """Simple quadratic function.""" + return np.sum(x**2) + +if __name__ == "__main__": + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + size = comm.Get_size() + + # Test 1: Pass COMM_WORLD explicitly (should work same as default) + if rank == 0: + print(f"Test 1: Using explicit COMM_WORLD with {size} processes") + + binning_tuples = [(-5.0, 5.0, 3), (-5.0, 5.0, 3)] + + binned_opt = BinMinMPI( + target_function, + binning_tuples, + optimizer="minimize", + optimizer_kwargs={"method": "L-BFGS-B"}, + return_bin_results=True, + comm=comm, # Explicit communicator + ) + result = binned_opt.run() + + if rank == 0: + print(f" Global optimum found: x={result['x_optimal'][0]}, y={result['y_optimal'][0]}") + print(f" Bins evaluated: {len(result['bin_tuples'])}") + assert abs(result['y_optimal'][0]) < 1e-6, "Expected minimum near 0" + print(" PASSED") + + comm.Barrier() + + # Test 2: Create a subcommunicator and use it + if size >= 2: + # Split into two groups: even and odd ranks + color = rank % 2 + subcomm = comm.Split(color=color, key=rank) + subrank = subcomm.Get_rank() + subsize = subcomm.Get_size() + + if rank == 0: + print(f"\nTest 2: Using subcommunicator (even ranks only, {subsize} processes)") + + # Only even ranks run this + if color == 0: + binned_opt2 = BinMinMPI( + target_function, + binning_tuples, + optimizer="minimize", + optimizer_kwargs={"method": "L-BFGS-B"}, + return_bin_results=True, + comm=subcomm, # Use subcommunicator + ) + result2 = binned_opt2.run() + + if subrank == 0: + print(f" Global optimum found: x={result2['x_optimal'][0]}, y={result2['y_optimal'][0]}") + print(f" Bins evaluated: {len(result2['bin_tuples'])}") + assert abs(result2['y_optimal'][0]) < 1e-6, "Expected minimum near 0" + print(" PASSED") + + subcomm.Free() + + comm.Barrier() + + if rank == 0: + print("\nAll tests passed!") diff --git a/tests/test_custom_comm_bottomup.py b/tests/test_custom_comm_bottomup.py new file mode 100644 index 0000000..7ecda2f --- /dev/null +++ b/tests/test_custom_comm_bottomup.py @@ -0,0 +1,54 @@ +"""Test that custom MPI communicators work with BinMinBottomUp.""" +import numpy as np +from mpi4py import MPI +from binminpy.BinMinBottomUp import BinMinBottomUp + +def target_function(x, *args): + """Simple quadratic function.""" + return np.sum(x**2) + +if __name__ == "__main__": + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + size = comm.Get_size() + + if size < 2: + if rank == 0: + print("Need at least 2 MPI processes for BinMinBottomUp test") + exit(0) + + # Test: Pass COMM_WORLD explicitly + if rank == 0: + print(f"Test: BinMinBottomUp with explicit COMM_WORLD ({size} processes)") + + binning_tuples = [[-5, 5, 10], [-5, 5, 10]] + + binned_opt = BinMinBottomUp( + target_function, + binning_tuples, + args=(), + sampler="latinhypercube", + optimizer="minimize", + optimizer_kwargs={"method": "L-BFGS-B", "tol": 1e-3}, + sampled_parameters=(0, 1), + n_initial_points=20, + n_sampler_points_per_bin=5, + save_evals=False, + return_evals=False, + return_bin_centers=False, + n_tasks_per_batch=5, + max_n_bins=20, + comm=comm, # Explicit communicator + ) + result = binned_opt.run() + + if rank == 0: + print(f" Global optimum found: x={result['x_optimal'][0]}, y={result['y_optimal'][0]}") + print(f" Bins evaluated: {len(result['bin_tuples'])}") + assert result['y_optimal'][0] < 1.0, "Expected minimum reasonably close to 0" + print(" PASSED") + + comm.Barrier() + + if rank == 0: + print("\nAll tests passed!")