diff --git a/boa/config/__main__.py b/boa/config/__main__.py index b3ca10a..122b5d5 100644 --- a/boa/config/__main__.py +++ b/boa/config/__main__.py @@ -1,13 +1,17 @@ -import pathlib +# TODO: Move this to a docs only feature and move back to PYYAML +import pathlib # pragma: no cover -import click -from ruamel.yaml import YAML -from ruamel.yaml.compat import StringIO +import click # pragma: no cover +from ruamel.yaml import YAML # pragma: no cover +from ruamel.yaml.compat import StringIO # pragma: no cover -from boa.config.config import add_comment_recurse, generate_default_doc_config +from boa.config.config import ( # pragma: no cover + add_comment_recurse, + generate_default_doc_config, +) -class YAMLDumper(YAML): +class YAMLDumper(YAML): # pragma: no cover def dump(self, data, stream=None, **kw): inefficient = False if stream is None: @@ -18,14 +22,14 @@ def dump(self, data, stream=None, **kw): return stream.getvalue() -@click.command() -@click.option( +@click.command() # pragma: no cover +@click.option( # pragma: no cover "--output-path", "-o", type=click.Path(exists=False, file_okay=True, dir_okay=False, path_type=pathlib.Path), default="default_config.yaml", ) -def main(output_path): +def main(output_path): # pragma: no cover """Generate a default config file with comments.""" d, c = generate_default_doc_config() yaml = YAML() @@ -35,5 +39,5 @@ def main(output_path): yaml.dump(data, f) -if __name__ == "__main__": +if __name__ == "__main__": # pragma: no cover main() diff --git a/boa/config/config.py b/boa/config/config.py index da63ec7..ccf26ff 100644 --- a/boa/config/config.py +++ b/boa/config/config.py @@ -43,7 +43,7 @@ NL = "\n" -def strip_white_space(s: str, strip_all=True): +def strip_white_space(s: str, strip_all=True): # pragma: no cover # Used in docs if not s: return s if strip_all: @@ -61,7 +61,7 @@ def strip_white_space(s: str, strip_all=True): class _Utils: _filtered_dict_fields: ClassVar[list[str]] = None - def to_dict(self) -> dict: + def to_dict(self) -> dict: # pragma: no cover # Used in docsi def vs(inst, attrib, val): if is_dataclass(val): return dc_asdict(val) @@ -180,6 +180,18 @@ class BOAMetric(_Utils): wrapper functions.""" }, ) + metric_func_kwargs: Optional[dict] = field( + default=None, + metadata={ + "doc": """Additional keyword arguments to be passed to the metric function. + This is useful when you are setting up a metric and only want to pass the metric function + additional arguments. + Example: Passing `metric_func_kwargs={"sqaured": false}` to sklearn mean_squared_error + to get the root mean squared error instead of the mean squared error + (Though BOA already has :class:`RMSE <.RMSE>` available from sklrean built in if needed). + """ + }, + ) def __init__(self, *args, lower_is_better: Optional[bool] = None, **kwargs): if lower_is_better is not None: @@ -764,7 +776,7 @@ def boa_params_to_wpr(params: list[dict], mapping, from_trial=True): return new_params -def generate_default_doc_config(): +def generate_default_doc_config(): # pragma: no cover config = BOAConfig( **{ @@ -806,7 +818,9 @@ def set_metadata_default_doc_recurse(d: dict, config): return d, config -def add_comment_recurse(d: ruamel.yaml.comments.CommentedMap, config=None, where="before", depth=0, indent=2): +def add_comment_recurse( + d: ruamel.yaml.comments.CommentedMap, config=None, where="before", depth=0, indent=2 +): # pragma: no cover fields = fields_dict(type(config)) if attr.has(config) else {} if isinstance(d, dict): for key in d: @@ -835,7 +849,7 @@ def add_comment_recurse(d: ruamel.yaml.comments.CommentedMap, config=None, where return d -if __name__ == "__main__": +if __name__ == "__main__": # pragma: no cover from tests.conftest import TEST_CONFIG_DIR c = BOAConfig.from_jsonlike(pathlib.Path(TEST_CONFIG_DIR / "test_config_generic.yaml")) diff --git a/boa/metrics/metrics.py b/boa/metrics/metrics.py index 0733008..a14b289 100644 --- a/boa/metrics/metrics.py +++ b/boa/metrics/metrics.py @@ -199,12 +199,14 @@ class RootMeanSquaredError(SklearnMetric): def __init__( self, lower_is_better=True, - metric_func_kwargs=(("squared", False),), + metric_func_kwargs=None, *args, **kwargs, ): - if metric_func_kwargs == (("squared", False),): - metric_func_kwargs = dict((y, x) for x, y in metric_func_kwargs) + if isinstance(metric_func_kwargs, dict): + metric_func_kwargs.update({"squared": False}) + else: + metric_func_kwargs = {"squared": False} super().__init__( lower_is_better=lower_is_better, metric_func_kwargs=metric_func_kwargs, @@ -297,15 +299,18 @@ def __init__(self, lower_is_better=True, *args, **kwargs): def get_metric_from_config(config: BOAMetric, instantiate=True, **kwargs) -> ModularMetric: kwargs["lower_is_better"] = config.minimize kwargs["metric_name"] = config.metric + kw = {**config.to_dict(), **kwargs} + if kw.get("metric_func_kwargs") is None: + kw.pop("metric_func_kwargs") if config.metric_type == MetricType.METRIC or config.metric_type == MetricType.BOA_METRIC: - metric = get_metric_by_class_name(instantiate=instantiate, **config.to_dict(), **kwargs) + metric = get_metric_by_class_name(instantiate=instantiate, **kw) elif config.metric_type == MetricType.SKLEARN_METRIC: kwargs["sklearn_"] = True - metric = get_metric_by_class_name(instantiate=instantiate, **config.to_dict(), **kwargs) + metric = get_metric_by_class_name(instantiate=instantiate, **kw) elif config.metric_type == MetricType.SYNTHETIC_METRIC: - metric = setup_synthetic_metric(instantiate=instantiate, **config.to_dict(), **kwargs) + metric = setup_synthetic_metric(instantiate=instantiate, **kw) elif config.metric_type == MetricType.PASSTHROUGH: # only name but no metric type - metric = PassThroughMetric(**config.to_dict(), **kwargs) + metric = PassThroughMetric(**kw) else: # TODO link to docs for configuration when it exists raise KeyError("No valid configuration for metric found.") diff --git a/boa/metrics/modular_metric.py b/boa/metrics/modular_metric.py index 4fc6067..bcc8fc6 100644 --- a/boa/metrics/modular_metric.py +++ b/boa/metrics/modular_metric.py @@ -197,10 +197,11 @@ def fetch_trial_data(self, trial: BaseTrial, **kwargs): def _evaluate(self, params: TParameterization, **kwargs) -> float: kwargs.update(params.pop("kwargs")) - return self.f(**get_dictionary_from_callable(self.metric_to_eval, kwargs)) def f(self, *args, **kwargs): + if self.metric_func_kwargs: # always pass the metric_func_kwargs, don't fail silently + kwargs.update(self.metric_func_kwargs) return self.metric_to_eval(*args, **kwargs) def clone(self) -> "Metric": diff --git a/boa/scripts/moo.py b/boa/scripts/moo.py index 2828e44..2918e23 100644 --- a/boa/scripts/moo.py +++ b/boa/scripts/moo.py @@ -1,3 +1,4 @@ +import tempfile from pathlib import Path import torch @@ -30,11 +31,13 @@ def fetch_trial_data(self, trial, metric_properties, metric_name, *args, **kwarg def main(): - config_path = Path(__file__).resolve().parent / "moo.yaml" - wrapper = Wrapper(config_path=config_path) - controller = Controller(wrapper=wrapper) - controller.initialize_scheduler() - return controller.run() + with tempfile.TemporaryDirectory() as temp_dir: + experiment_dir = Path(temp_dir) + config_path = Path(__file__).resolve().parent / "moo.yaml" + wrapper = Wrapper(config_path=config_path, experiment_dir=experiment_dir) + controller = Controller(wrapper=wrapper) + controller.initialize_scheduler() + return controller.run() if __name__ == "__main__": diff --git a/boa/scripts/moo.yaml b/boa/scripts/moo.yaml index c16f243..3d31a64 100644 --- a/boa/scripts/moo.yaml +++ b/boa/scripts/moo.yaml @@ -1,20 +1,18 @@ # MultiObjective Optimization config -optimization_options: - objective_options: - objective_thresholds: - - branin >= -18.0 - - currin >= -6.0 - objectives: - - name: branin - lower_is_better: False - noise_sd: 0 - - name: currin - lower_is_better: False - noise_sd: 0 +objective: + objective_thresholds: + - branin >= -18.0 + - currin >= -6.0 + metrics: + - name: branin + lower_is_better: False + noise_sd: 0 + - name: currin + lower_is_better: False + noise_sd: 0 - experiment: - name: "moo_run" - trials: 50 +scheduler: + n_trials: 30 parameters: x0: @@ -24,4 +22,7 @@ parameters: x1: type: range bounds: [0, 1] - value_type: float \ No newline at end of file + value_type: float + +script_options: + exp_name: "moo_run" diff --git a/boa/scripts/run_branin.py b/boa/scripts/run_branin.py index 58f0616..83c8707 100644 --- a/boa/scripts/run_branin.py +++ b/boa/scripts/run_branin.py @@ -1,5 +1,6 @@ import logging import shutil +import tempfile import time from pathlib import Path @@ -21,9 +22,9 @@ def main(): - # with tempfile.TemporaryDirectory() as exp_dir: - exp_dir = "." - return run_opt(exp_dir) + with tempfile.TemporaryDirectory() as temp_dir: + experiment_dir = Path(temp_dir) + return run_opt(exp_dir=experiment_dir) def run_opt(exp_dir): diff --git a/boa/scripts/synth_func_config.yaml b/boa/scripts/synth_func_config.yaml index 048ed05..1dc63a4 100644 --- a/boa/scripts/synth_func_config.yaml +++ b/boa/scripts/synth_func_config.yaml @@ -1,19 +1,17 @@ -optimization_options: - objective_options: # can also use the key moo - objectives: - - name: rmse - metric: RMSE - noise_sd: .1 +objective: # can also use the key moo + metrics: + - name: rmse + metric: RMSE + noise_sd: .1 - generation_strategy: - steps: - - model: SOBOL - num_trials: 5 - - model: GPEI - num_trials: -1 - scheduler: - total_trials: 20 - on_reload: +generation_strategy: + steps: + - model: SOBOL + num_trials: 5 + - model: GPEI + num_trials: -1 +scheduler: + total_trials: 20 parameters: x0: @@ -25,10 +23,12 @@ parameters: 'type': 'range' 'value_type': 'float' +# options only needed by the model and not BOA +# You can put anything here that your model might need model_options: input_size: 15 script_options: wrapper_path: ./script_wrappers.py wrapper_name: Wrapper - append_timestamp: True \ No newline at end of file + append_timestamp: True diff --git a/boa/template.py b/boa/template.py new file mode 100644 index 0000000..d1fd63e --- /dev/null +++ b/boa/template.py @@ -0,0 +1,6 @@ +import jinja2 + + +def render_template(template_name, **kwargs): + template = jinja2.Environment(loader=jinja2.FileSystemLoader("templates")).get_template(template_name) + return template.render(**kwargs) diff --git a/environment.yml b/environment.yml index 817c3fe..c0f8f88 100644 --- a/environment.yml +++ b/environment.yml @@ -21,4 +21,4 @@ dependencies: - ax-platform==0.3.3 - ruamel.yaml - attrs - +- jinja2 diff --git a/environment_dev.yml b/environment_dev.yml index 6447eb3..bfe7d3f 100644 --- a/environment_dev.yml +++ b/environment_dev.yml @@ -23,6 +23,7 @@ dependencies: - ruamel.yaml - domdfcoding::attr_utils - attrs +- jinja2 ## Jupyter and sphinx jupyter - myst-nb diff --git a/tests/1unit_tests/test_config_deprecation_normalization.py b/tests/1unit_tests/test_config_deprecation_normalization.py index 536da12..41f5683 100644 --- a/tests/1unit_tests/test_config_deprecation_normalization.py +++ b/tests/1unit_tests/test_config_deprecation_normalization.py @@ -1,15 +1,19 @@ +import pytest + from boa import BOAConfig -def test_config_deprecation_normalization( - synth_config, - metric_config, - gen_strat1_config, - soo_config, - moo_config, - pass_through_config, - scripts_moo, - scripts_synth_func, -): - for config in [synth_config, metric_config, gen_strat1_config, soo_config, moo_config, pass_through_config]: - assert isinstance(config, BOAConfig) +@pytest.mark.parametrize( + "config", + [ + "synth_config_deprecated", + "metric_config_deprecated", + "gen_strat1_config_deprecated", + "soo_config_deprecated", + "moo_config_deprecated", + "pass_through_config_deprecated", + ], # 1. pass fixture name as a string +) +def test_config_deprecation_normalization(config, request): + config = request.getfixturevalue(config) + assert isinstance(config, BOAConfig) diff --git a/tests/1unit_tests/test_generation_strategy.py b/tests/1unit_tests/test_generation_strategy.py index 63cd0ef..489a0b7 100644 --- a/tests/1unit_tests/test_generation_strategy.py +++ b/tests/1unit_tests/test_generation_strategy.py @@ -22,8 +22,8 @@ def test_gen_steps_from_config(gen_strat1_config): assert gs1 == gs2 -def test_auto_gen_use_saasbo(saasbo_config): - controller = Controller(config=saasbo_config, wrapper=ScriptWrapper(config=saasbo_config)) +def test_auto_gen_use_saasbo(saasbo_config, tmp_path): + controller = Controller(config=saasbo_config, wrapper=ScriptWrapper(config=saasbo_config, experiment_dir=tmp_path)) exp = get_experiment( config=controller.config, runner=WrappedJobRunner(wrapper=controller.wrapper), wrapper=controller.wrapper ) diff --git a/tests/1unit_tests/test_metrics.py b/tests/1unit_tests/test_metrics.py index adcbcd0..943cedf 100644 --- a/tests/1unit_tests/test_metrics.py +++ b/tests/1unit_tests/test_metrics.py @@ -4,9 +4,11 @@ from boa import ( BaseWrapper, + BOAMetric, Controller, get_metric_by_class_name, get_metric_from_config, + setup_sklearn_metric, setup_synthetic_metric, ) @@ -57,23 +59,31 @@ def test_load_metric_by_name(): assert metric_synth.name == "something" assert metric_synth.metric_to_eval.name == "FromBotorch_Hartmann4" - metric_sklearn = get_metric_by_class_name("MSE") - assert metric_sklearn.name == "MSE" - assert metric_sklearn.metric_to_eval.__name__ == "mean_squared_error" + metric_boa = get_metric_by_class_name("MSE") + assert metric_boa.name == "MSE" + assert metric_boa.metric_to_eval.__name__ == "mean_squared_error" - metric_sklearn = get_metric_by_class_name("MSE", name="something") + metric_boa = get_metric_by_class_name("MSE", name="something") + assert metric_boa.name == "something" + assert metric_boa.metric_to_eval.__name__ == "mean_squared_error" + + metric_sklearn = setup_sklearn_metric("median_absolute_error") + assert metric_sklearn.name == "median_absolute_error" + assert metric_sklearn.metric_to_eval.__name__ == "median_absolute_error" + + metric_sklearn = setup_sklearn_metric("median_absolute_error", name="something") assert metric_sklearn.name == "something" - assert metric_sklearn.metric_to_eval.__name__ == "mean_squared_error" + assert metric_sklearn.metric_to_eval.__name__ == "median_absolute_error" -def test_load_metric_from_config(synth_config, metric_config): +def test_load_metric_from_config(synth_config, generic_config): metrics = synth_config.objective.metrics for metric_c in metrics: metric = get_metric_from_config(metric_c) assert metric.name == "Hartmann4" assert metric.metric_to_eval.name == "FromBotorch_Hartmann4" - metrics = metric_config.objective.metrics + metrics = generic_config.objective.metrics for metric_c in metrics: if not metric_c.info_only: metric = get_metric_from_config(metric_c) @@ -149,8 +159,8 @@ def test_metric_fetch_trial_data_works_with_wrapper_fetch_trial_data_single_and_ prev_f_ret = f_ret -def test_can_create_info_only_metrics(metric_config, tmp_path): - controller = Controller(config=metric_config, wrapper=Wrapper, experiment_dir=tmp_path) +def test_can_create_info_only_metrics(generic_config, tmp_path): + controller = Controller(config=generic_config, wrapper=Wrapper, experiment_dir=tmp_path) controller.initialize_scheduler() assert isinstance(controller.scheduler.experiment.optimization_config, OptimizationConfig) @@ -177,3 +187,17 @@ def test_pass_through_metric_passes_through_value(pass_through_config, tmp_path) f_ret = metric.f(wrapper.fetch_trial_data(trial, {}, name)) assert f_ret == data.df["mean"].iloc[0] assert f_ret == trial.index + + +def test_can_override_metric_func_kwargs(): + x = [1, 2, 3, 4, 5, 6] + y = [0.1 * i for i in reversed(x)] + returns = [] + normalizers = ["iqr", "std", "mean", "range"] + for normalizer in normalizers: + config = BOAMetric(**dict(metric="NRMSE", metric_func_kwargs=dict(normalizer=normalizer))) + metric = get_metric_from_config(config) + assert metric.metric_to_eval.__name__ == "normalized_root_mean_squared_error" + returns.append(metric.f(x, y)) + # All the normalized values should be different, ensuring that the kwargs are passed through + assert len(set(returns)) == len(normalizers) diff --git a/tests/1unit_tests/test_wrapper.py b/tests/1unit_tests/test_wrapper.py index 0fef01e..6f0b554 100644 --- a/tests/1unit_tests/test_wrapper.py +++ b/tests/1unit_tests/test_wrapper.py @@ -1,5 +1,5 @@ from boa import BaseWrapper, BOAConfig -def test_wrapper_instantiation(generic_config): - BaseWrapper(config=generic_config) +def test_wrapper_instantiation(generic_config, tmp_path): + BaseWrapper(config=generic_config, experiment_dir=tmp_path) diff --git a/tests/conftest.py b/tests/conftest.py index 6858658..a5336ab 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,13 +7,14 @@ import boa.__main__ as dunder_main import boa.scripts.moo as run_moo import boa.scripts.run_branin as run_branin -from boa import BOAConfig, cd_and_cd_back, load_yaml, split_shell_command +from boa import BOAConfig, cd_and_cd_back, split_shell_command from boa.definitions import ROOT, TEST_SCRIPTS_DIR logger = logging.getLogger(__file__) TEST_DIR = ROOT / "tests" TEST_CONFIG_DIR = TEST_DIR / "test_configs" +TEST_DEPRECATED_CONFIG_DIR = TEST_DIR / "test_configs/deprecated_configs" @pytest.fixture @@ -23,26 +24,43 @@ def generic_config(): @pytest.fixture -def synth_config(): - config_path = TEST_CONFIG_DIR / "test_config_synth.yaml" +def saasbo_config(): + config_path = TEST_CONFIG_DIR / "test_config_saasbo.yaml" return BOAConfig.from_jsonlike(file=config_path) @pytest.fixture -def metric_config(): - config_path = TEST_CONFIG_DIR / "test_config_metric.yaml" +def moo_config(): + """MultiObjective Optimization config""" + config_path = TEST_CONFIG_DIR / "test_config_moo.yaml" return BOAConfig.from_jsonlike(file=config_path) @pytest.fixture -def gen_strat1_config(): - config_path = TEST_CONFIG_DIR / "test_config_gen_strat1.yaml" +def scripts_moo(): + """PassThrough Optimization config""" + config_path = TEST_SCRIPTS_DIR / "moo.yaml" return BOAConfig.from_jsonlike(file=config_path) @pytest.fixture -def saasbo_config(): - config_path = TEST_CONFIG_DIR / "test_config_saasbo.yaml" +def scripts_synth_func(): + """PassThrough Optimization config""" + config_path = TEST_SCRIPTS_DIR / "synth_func_config.yaml" + return BOAConfig.from_jsonlike(file=config_path) + + +@pytest.fixture +def denormed_param_parse_config(): + """MultiObjective Optimization config""" + config_path = TEST_CONFIG_DIR / "test_config_param_parse.yaml" + return BOAConfig.from_jsonlike(file=config_path) + + +@pytest.fixture +def pass_through_config(): + """PassThrough Optimization config""" + config_path = TEST_CONFIG_DIR / "test_config_pass_through_metric.yaml" return BOAConfig.from_jsonlike(file=config_path) @@ -54,48 +72,62 @@ def soo_config(): @pytest.fixture -def moo_config(): - """MultiObjective Optimization config""" - config_path = TEST_CONFIG_DIR / "test_config_moo.yaml" +def gen_strat1_config(): + config_path = TEST_CONFIG_DIR / "test_config_gen_strat1.yaml" return BOAConfig.from_jsonlike(file=config_path) @pytest.fixture -def pass_through_config(): - """PassThrough Optimization config""" - config_path = TEST_CONFIG_DIR / "test_config_pass_through_metric.yaml" +def synth_config(): + config_path = TEST_CONFIG_DIR / "test_config_synth.yaml" return BOAConfig.from_jsonlike(file=config_path) +###################### +# Deprecated Configs # +###################### + +# Only tested in test_config_deprecation_normalization.py, which tests +# the deprecation normalization process + + @pytest.fixture -def scripts_moo(): +def pass_through_config_deprecated(): """PassThrough Optimization config""" - config_path = TEST_SCRIPTS_DIR / "moo.yaml" + config_path = TEST_DEPRECATED_CONFIG_DIR / "test_config_pass_through_metric_deprecated.yaml" return BOAConfig.from_jsonlike(file=config_path) @pytest.fixture -def scripts_synth_func(): - """PassThrough Optimization config""" - config_path = TEST_SCRIPTS_DIR / "synth_func_config.yaml" +def soo_config_deprecated(): + """ScalarizedObjective Optimization config""" + config_path = TEST_DEPRECATED_CONFIG_DIR / "test_config_soo_deprecated.yaml" return BOAConfig.from_jsonlike(file=config_path) @pytest.fixture -def denormed_param_parse_config(): - """MultiObjective Optimization config""" - config_path = TEST_CONFIG_DIR / "test_config_param_parse.yaml" +def metric_config_deprecated(): + config_path = TEST_DEPRECATED_CONFIG_DIR / "test_config_metric_deprecated.yaml" + return BOAConfig.from_jsonlike(file=config_path) + + +@pytest.fixture +def gen_strat1_config_deprecated(): + config_path = TEST_DEPRECATED_CONFIG_DIR / "test_config_gen_strat1_deprecated.yaml" return BOAConfig.from_jsonlike(file=config_path) @pytest.fixture -def synth_optimization_options(synth_config): - return synth_config["optimization_options"] +def synth_config_deprecated(): + config_path = TEST_DEPRECATED_CONFIG_DIR / "test_config_synth_deprecated.yaml" + return BOAConfig.from_jsonlike(file=config_path) @pytest.fixture -def metric_optimization_options(metric_config): - return metric_config["optimization_options"] +def moo_config_deprecated(): + """MultiObjective Optimization config""" + config_path = TEST_DEPRECATED_CONFIG_DIR / "test_config_moo_deprecated.yaml" + return BOAConfig.from_jsonlike(file=config_path) @pytest.fixture @@ -124,6 +156,7 @@ def moo_main_run(tmp_path_factory, cd_to_root_and_back_session): def stand_alone_opt_package_run(request, tmp_path_factory, cd_to_root_and_back_session): # parametrize the test to pass in script options in config as relative and absolute paths if getattr(request, "param", None) == "absolute": + temp_dir = tmp_path_factory.mktemp("temp_dir") wrapper_path = (TEST_DIR / "scripts/stand_alone_opt_package/wrapper.py").resolve() config = { "objective": {"metrics": [{"metric": "mean", "name": "Mean"}, {"metric": "RMSE", "info_only": True}]}, @@ -135,16 +168,21 @@ def stand_alone_opt_package_run(request, tmp_path_factory, cd_to_root_and_back_s {"bounds": [-5.0, 10.0], "name": "x0", "type": "range"}, {"bounds": [0.0, 15.0], "name": "x1", "type": "range"}, ], - "script_options": {"wrapper_path": str(wrapper_path)}, + "script_options": { + "wrapper_path": str(wrapper_path), + "output_dir": str(temp_dir), + "exp_name": "test_experiment", + }, } - temp_dir = tmp_path_factory.mktemp("temp_dir") config_path = temp_dir / "config.yaml" with open(Path(config_path), "w") as file: json.dump(config, file) + args = f"--config-path {config_path}" else: config_path = TEST_DIR / "scripts/stand_alone_opt_package/stand_alone_pkg_config.yaml" + args = f"--config-path {config_path} -td" - yield dunder_main.main(split_shell_command(f"--config-path {config_path} -td"), standalone_mode=False) + yield dunder_main.main(split_shell_command(args), standalone_mode=False) @pytest.fixture(scope="session") diff --git a/tests/integration_tests/test_storage.py b/tests/integration_tests/test_storage.py index d489d96..0263f50 100644 --- a/tests/integration_tests/test_storage.py +++ b/tests/integration_tests/test_storage.py @@ -3,7 +3,6 @@ import numpy as np import pytest -from attrs import fields from ax import Experiment, Objective, OptimizationConfig from ax.storage.json_store.decoder import object_from_json from ax.storage.json_store.encoder import object_to_json @@ -32,40 +31,39 @@ TEST_DIR = ROOT / "tests" -def test_save_load_config( - generic_config, - synth_config, - metric_config, - gen_strat1_config, - soo_config, - moo_config, - pass_through_config, - scripts_moo, - scripts_synth_func, -): - for config in [ - generic_config, - synth_config, - metric_config, - gen_strat1_config, - soo_config, - moo_config, - pass_through_config, - scripts_moo, - scripts_synth_func, - ]: - serialized = object_to_json( - config, - encoder_registry=CORE_ENCODER_REGISTRY, - class_encoder_registry=CORE_CLASS_ENCODER_REGISTRY, - ) +@pytest.mark.parametrize( + "config", + [ + "generic_config", + "synth_config", + "gen_strat1_config", + "soo_config", + "moo_config", + "pass_through_config", + "scripts_moo", + "scripts_synth_func", + "synth_config_deprecated", + "metric_config_deprecated", + "gen_strat1_config_deprecated", + "soo_config_deprecated", + "moo_config_deprecated", + "pass_through_config_deprecated", + ], # 1. pass fixture name as a string +) +def test_save_load_config(config, request): + config = request.getfixturevalue(config) + serialized = object_to_json( + config, + encoder_registry=CORE_ENCODER_REGISTRY, + class_encoder_registry=CORE_CLASS_ENCODER_REGISTRY, + ) - c = object_from_json( - serialized, - decoder_registry=CORE_DECODER_REGISTRY, - class_decoder_registry=CORE_CLASS_DECODER_REGISTRY, - ) - assert config == c + c = object_from_json( + serialized, + decoder_registry=CORE_DECODER_REGISTRY, + class_decoder_registry=CORE_CLASS_DECODER_REGISTRY, + ) + assert config == c def test_save_load_scheduler_branin(branin_main_run, tmp_path): @@ -145,7 +143,7 @@ def test_boa_version_in_scheduler(stand_alone_opt_package_run, tmp_path_factory) @pytest.mark.skip(reason="Scheduler can't be saved with generic callable yet") -def test_save_load_scheduler_with_generic_callable(metric_config, tmp_path): +def test_save_load_scheduler_with_generic_callable(generic_config, tmp_path): p = (ROOT / "tests/scripts/stand_alone_opt_package").resolve() sys.path.append(p) @@ -153,7 +151,7 @@ def test_save_load_scheduler_with_generic_callable(metric_config, tmp_path): with cd_and_cd_back(p): scheduler_json = tmp_path / "scheduler.json" - config = metric_config + config = generic_config opt_options = config["optimization_options"] wrapper = Wrapper() diff --git a/tests/scripts/other_langs/r_package_full/config.yaml b/tests/scripts/other_langs/r_package_full/config.yaml index ef7e5f3..bced8ce 100644 --- a/tests/scripts/other_langs/r_package_full/config.yaml +++ b/tests/scripts/other_langs/r_package_full/config.yaml @@ -1,17 +1,16 @@ -optimization_options: - objective_options: # can also use the key moo - objectives: - - name: metric - metric: mean - generation_strategy: - steps: - - model: SOBOL - num_trials: 5 - - model: GPEI - num_trials: -1 - scheduler: - total_trials: 15 - tolerated_trial_failure_rate: 0.8 +objective: # can also use the key moo + metrics: + - name: metric + metric: mean +generation_strategy: + steps: + - model: SOBOL + num_trials: 5 + - model: GPEI + num_trials: -1 +scheduler: + total_trials: 15 + tolerated_trial_failure_rate: 0.8 parameters: x0: diff --git a/tests/scripts/other_langs/r_package_light/config.yaml b/tests/scripts/other_langs/r_package_light/config.yaml index fa8cd7d..34b911e 100644 --- a/tests/scripts/other_langs/r_package_light/config.yaml +++ b/tests/scripts/other_langs/r_package_light/config.yaml @@ -1,16 +1,15 @@ -optimization_options: - objective_options: # can also use the key moo - objectives: - - name: metric - metric: mean - generation_strategy: - steps: - - model: SOBOL - num_trials: 5 - - model: GPEI - num_trials: -1 - scheduler: - total_trials: 15 +objective: # can also use the key moo + metrics: + - name: metric + metric: mean +generation_strategy: + steps: + - model: SOBOL + num_trials: 5 + - model: GPEI + num_trials: -1 +scheduler: + total_trials: 15 parameters: x0: diff --git a/tests/scripts/other_langs/r_package_streamlined/config.yaml b/tests/scripts/other_langs/r_package_streamlined/config.yaml index 751acf8..a40631d 100644 --- a/tests/scripts/other_langs/r_package_streamlined/config.yaml +++ b/tests/scripts/other_langs/r_package_streamlined/config.yaml @@ -1,10 +1,8 @@ -optimization_options: - objective_options: - objectives: - - name: metric - experiment: - name: "r_streamlined_run" - trials: 15 +objective: + metrics: + - name: metric +scheduler: + n_trials: 15 parameters: x0: @@ -43,6 +41,7 @@ script_options: # it uses the config file directory as your working directory) # here config.yaml and run_model.R are in the same directory run_model: Rscript run_model.R + exp_name: "r_streamlined_run" # options only needed by the model and not BOA diff --git a/tests/scripts/other_langs/r_package_streamlined/config_fail.yaml b/tests/scripts/other_langs/r_package_streamlined/config_fail.yaml index a6fdde9..7be126b 100644 --- a/tests/scripts/other_langs/r_package_streamlined/config_fail.yaml +++ b/tests/scripts/other_langs/r_package_streamlined/config_fail.yaml @@ -1,16 +1,14 @@ -optimization_options: - objective_options: # can also use the key moo - objectives: - - name: mean - boa_metric: mean - generation_strategy: - steps: - - model: SOBOL - num_trials: 5 - - model: GPEI - num_trials: -1 - scheduler: - total_trials: 15 +objective: # can also use the key moo + metrics: + - metric: mean +generation_strategy: + steps: + - model: SOBOL + num_trials: 5 + - model: GPEI + num_trials: -1 +scheduler: + total_trials: 15 # This fails because the parameters should be floats from 0 to 1, instead of choice parameters parameters: @@ -42,6 +40,9 @@ parameters: script_options: run_model: Rscript run_model.R +# options only needed by the model and not BOA +# You can put anything here that your model might need +# We don't need anything extra so we leave it commented out model_options: input_size: 15 model_dir: . diff --git a/tests/scripts/stand_alone_opt_package/stand_alone_pkg_config.yaml b/tests/scripts/stand_alone_opt_package/stand_alone_pkg_config.yaml index 95a0f6d..dd894fb 100644 --- a/tests/scripts/stand_alone_opt_package/stand_alone_pkg_config.yaml +++ b/tests/scripts/stand_alone_opt_package/stand_alone_pkg_config.yaml @@ -1,23 +1,18 @@ # Single objective optimization config -optimization_options: - objective_options: - objectives: - - metric: mean - name: Mean - - metric: RMSE - info_only: True - generation_strategy: - steps: - - model: SOBOL - num_trials: 5 - - model: GPEI - num_trials: -1 +objective: + metrics: + - metric: mean + name: Mean + - metric: RMSE + info_only: True +generation_strategy: + steps: + - model: SOBOL + num_trials: 5 + - model: GPEI + num_trials: -1 +scheduler: n_trials: 10 -# working_dir: . -# experiment_dir: ... -# append_timestamp: True -# This last option appends a timestamp to our output experiment directory. -# This is also the default (True) parameters: x0: diff --git a/tests/test_configs/deprecated_configs/test_config_gen_strat1_deprecated.yaml b/tests/test_configs/deprecated_configs/test_config_gen_strat1_deprecated.yaml new file mode 100644 index 0000000..4e67805 --- /dev/null +++ b/tests/test_configs/deprecated_configs/test_config_gen_strat1_deprecated.yaml @@ -0,0 +1,52 @@ +optimization_options: + objective_options: + objectives: + - name: rmse + metric: RootMeanSquaredError + generation_strategy: + steps: + - model: SOBOL + num_trials: 5 + - model: GPEI + num_trials: -1 + experiment: + name: "test_experiment" + trials: 10 + +model_options: + model_specific_options: + - 1 + - 2 + - 3 + +parameters: + x1: + type: range + bounds: [0, 1] + value_type: float + + x2: + type: range + bounds: [0, 1] + value_type: float + + x3: + type: range + bounds: [0, 1] + value_type: float + + x4: + type: range + bounds: [0, 1] + value: .5 # dummy value for testing + value_type: float + + x5: # dummy value for testing + type: fixed + bounds: [ 0, 1 ] + value: .5 # dummy value for testing + value_type: float + +parameter_constraints: + - x2 + x1 => >.1 + - x2 + x1 + .6*x1 <= .6 diff --git a/tests/test_configs/test_config_metric.yaml b/tests/test_configs/deprecated_configs/test_config_metric_deprecated.yaml similarity index 100% rename from tests/test_configs/test_config_metric.yaml rename to tests/test_configs/deprecated_configs/test_config_metric_deprecated.yaml diff --git a/tests/test_configs/deprecated_configs/test_config_moo_deprecated.yaml b/tests/test_configs/deprecated_configs/test_config_moo_deprecated.yaml new file mode 100644 index 0000000..96bcc55 --- /dev/null +++ b/tests/test_configs/deprecated_configs/test_config_moo_deprecated.yaml @@ -0,0 +1,50 @@ +# MultiObjective Optimization config +optimization_options: + objective_options: + objectives: + # List all of your metrics here, + # only list multiple objectives for a multi objective optimization + - metric: RMSE # names default to the metric itself if not specified + - name: Meanyyy + metric: Mean + outcome_constraints: [] + objective_thresholds: [] + experiment: + name: "test_experiment" + n_trials: 10 + +parameters: + x1: + type: range + bounds: [0, 1] + value_type: float + + x2: + type: range + bounds: [0, 1] + value_type: float + + x3: + type: range + bounds: [0, 1] + value_type: float + + x4: + type: range + bounds: [0, 1] + value_type: float + + x5: + type: fixed + value: .5 + value_type: float + +parameter_constraints: + - x2 + x1 >= .1 + - x2 + x1 + .6*x1 <= .6 + +model_options: + model_specific_options: + - 1 + - 2 + - 3 diff --git a/tests/test_configs/deprecated_configs/test_config_pass_through_metric_deprecated.yaml b/tests/test_configs/deprecated_configs/test_config_pass_through_metric_deprecated.yaml new file mode 100644 index 0000000..44d014c --- /dev/null +++ b/tests/test_configs/deprecated_configs/test_config_pass_through_metric_deprecated.yaml @@ -0,0 +1,21 @@ +# Single objective optimization config +optimization_options: + objective_options: + objectives: + # We can specify a metric as passthrough (it doesn't require a dictionary argument and just passes the value + # through this way + - name: metric1 + metric: passthrough + # Also a metric that doesn't have the metric type specified and only the name will + # default to a passthrough metric + - name: metric2 + scheduler: + total_trials: 10 + +# optimization parameters +parameters: + x1: + type: range + bounds: [0, 1] + value_type: float + diff --git a/tests/test_configs/deprecated_configs/test_config_soo_deprecated.yaml b/tests/test_configs/deprecated_configs/test_config_soo_deprecated.yaml new file mode 100644 index 0000000..2b62426 --- /dev/null +++ b/tests/test_configs/deprecated_configs/test_config_soo_deprecated.yaml @@ -0,0 +1,69 @@ +# ScalarizedObjective Optimization config +optimization_options: + objective_options: # can also use the key soo + objectives: + - name: rmse + metric: RootMeanSquaredError + properties: + obs_file: path + - metric: R2 + weights: + - 1 + - 2 + minimize: true +# outcome_constraints: +# - metric: +# boa_metric: mean +# name: something +# op: LEQ +# bound: 10 +# relative: true + + generation_strategy: + steps: + - model: SOBOL + num_trials: 5 + - model: GPEI + num_trials: -1 + scheduler: + total_trials: 10 + experiment: + name: "test_experiment" + +model_options: + model_specific_options: + - 1 + - 2 + - 3 + +parameters: + x1: + type: range + bounds: [0, 1] + value_type: float + + x2: + type: range + bounds: [0, 1] + value_type: float + + x3: + type: range + bounds: [0, 1] + value_type: float + + x4: + type: range + bounds: [0, 1] + value: .5 # dummy value for testing + value_type: float + + x5: # dummy value for testing + type: fixed + bounds: [ 0, 1 ] + value: .5 # dummy value for testing + value_type: float + +parameter_constraints: + - x2 + x1 >= .1 + - x2 + x1 + .6*x1 <= .6 diff --git a/tests/test_configs/deprecated_configs/test_config_synth_deprecated.yaml b/tests/test_configs/deprecated_configs/test_config_synth_deprecated.yaml new file mode 100644 index 0000000..bd45c0a --- /dev/null +++ b/tests/test_configs/deprecated_configs/test_config_synth_deprecated.yaml @@ -0,0 +1,39 @@ +optimization_options: + objective_options: + objectives: + - synthetic_metric: Hartmann4 + scheduler: + total_trials: 10 + experiment: + name: "test_experiment" + +model_options: + model_specific_options: + - 1 + - 2 + - 3 + +parameters: + x1: # Ratio of urban area that is house (not ground, not trees) + type: range + bounds: [0, 1] + value_type: float + + x2: # Ratio of urban area that is ground (not house, not trees) + type: range + bounds: [0, 1] + value_type: float + + x3: # Size of a plot of land (area of a house and its ground it sits on) + type: range + bounds: [0, 1] + value_type: float + + x4: # The mean LAI of the canopy. There will be random perturbations to this + type: range + bounds: [0, 1] + value_type: float + +parameter_constraints: + - x2 + x1 >= .1 + - x2 + x1 + .6*x1 <= .6 diff --git a/tests/test_configs/test_config_gen_strat1.yaml b/tests/test_configs/test_config_gen_strat1.yaml index 5400ace..5714341 100644 --- a/tests/test_configs/test_config_gen_strat1.yaml +++ b/tests/test_configs/test_config_gen_strat1.yaml @@ -1,18 +1,23 @@ -optimization_options: - objective_options: - objectives: - - name: rmse - metric: RootMeanSquaredError - generation_strategy: - steps: - - model: SOBOL - num_trials: 5 - - model: GPEI - num_trials: -1 - scheduler: - total_trials: 10 - experiment: - name: "test_experiment" +objective: + metrics: + - name: rmse + metric: RootMeanSquaredError + properties: + # You can add any property you want to the metric + # Use this to pass any information you want to the metric + # Through your wrapper + any_property: any_value + +generation_strategy: + steps: + - model: SOBOL + num_trials: 5 + - model: GPEI + num_trials: -1 +scheduler: + total_trials: 10 +script_options: + exp_name: "test_experiment" model_options: model_specific_options: diff --git a/tests/test_configs/test_config_generic.yaml b/tests/test_configs/test_config_generic.yaml index 8ce9905..b901f9e 100644 --- a/tests/test_configs/test_config_generic.yaml +++ b/tests/test_configs/test_config_generic.yaml @@ -64,13 +64,3 @@ scheduler: global_stopping_strategy: type: ImprovementGlobalStoppingStrategy min_trials: 7 - early_stopping_strategy: - type: AndEarlyStoppingStrategy - left: - type: PercentileEarlyStoppingStrategy - metric_names: - - rmse - right: - type: PercentileEarlyStoppingStrategy - metric_names: - - rmse diff --git a/tests/test_configs/test_config_moo.yaml b/tests/test_configs/test_config_moo.yaml index 3f8514a..5e8ed4c 100644 --- a/tests/test_configs/test_config_moo.yaml +++ b/tests/test_configs/test_config_moo.yaml @@ -1,18 +1,17 @@ # MultiObjective Optimization config -optimization_options: - objective_options: - objectives: - # List all of your metrics here, - # only list multiple objectives for a multi objective optimization - - metric: RMSE # names default to the metric itself if not specified - - name: Meanyyy - metric: Mean - outcome_constraints: [] - objective_thresholds: [] - experiment: - name: "test_experiment" - scheduler: - total_trials: 10 +objective: + metrics: + # List all of your metrics here, + # only list multiple objectives for a multi objective optimization + - metric: RMSE # names default to the metric itself if not specified + - name: Meanyyy + metric: Mean + outcome_constraints: [] + objective_thresholds: [] +scheduler: + total_trials: 10 +script_options: + exp_name: "test_experiment" parameters: x1: diff --git a/tests/test_configs/test_config_pass_through_metric.yaml b/tests/test_configs/test_config_pass_through_metric.yaml index 44d014c..c761adb 100644 --- a/tests/test_configs/test_config_pass_through_metric.yaml +++ b/tests/test_configs/test_config_pass_through_metric.yaml @@ -1,16 +1,15 @@ # Single objective optimization config -optimization_options: - objective_options: - objectives: - # We can specify a metric as passthrough (it doesn't require a dictionary argument and just passes the value - # through this way - - name: metric1 - metric: passthrough - # Also a metric that doesn't have the metric type specified and only the name will - # default to a passthrough metric - - name: metric2 - scheduler: - total_trials: 10 +objective: + metrics: + # We can specify a metric as passthrough (it doesn't require a dictionary argument and just passes the value + # through this way + - name: metric1 + metric: passthrough + # Also a metric that doesn't have the metric type specified and only the name will + # default to a passthrough metric + - name: metric2 +scheduler: + total_trials: 10 # optimization parameters parameters: @@ -18,4 +17,3 @@ parameters: type: range bounds: [0, 1] value_type: float - diff --git a/tests/test_configs/test_config_soo.yaml b/tests/test_configs/test_config_soo.yaml index 2b62426..34c956a 100644 --- a/tests/test_configs/test_config_soo.yaml +++ b/tests/test_configs/test_config_soo.yaml @@ -1,34 +1,32 @@ # ScalarizedObjective Optimization config -optimization_options: - objective_options: # can also use the key soo - objectives: - - name: rmse - metric: RootMeanSquaredError - properties: - obs_file: path - - metric: R2 - weights: - - 1 - - 2 - minimize: true -# outcome_constraints: -# - metric: -# boa_metric: mean -# name: something -# op: LEQ -# bound: 10 -# relative: true +objective: # can also use the key soo + metrics: + - name: rmse + metric: RootMeanSquaredError + weight: 1 + properties: + obs_file: path + - metric: R2 + weight: 2 + minimize: true +# outcome_constraints: +# - metric: +# metric: rmse +# name: something +# op: LEQ +# bound: 10 +# relative: true - generation_strategy: - steps: - - model: SOBOL - num_trials: 5 - - model: GPEI - num_trials: -1 - scheduler: - total_trials: 10 - experiment: - name: "test_experiment" +generation_strategy: + steps: + - model: SOBOL + num_trials: 5 + - model: GPEI + num_trials: -1 +scheduler: + total_trials: 10 +script_options: + exp_name: "test_experiment" model_options: model_specific_options: diff --git a/tests/test_configs/test_config_synth.yaml b/tests/test_configs/test_config_synth.yaml index bd45c0a..932dd51 100644 --- a/tests/test_configs/test_config_synth.yaml +++ b/tests/test_configs/test_config_synth.yaml @@ -1,11 +1,9 @@ -optimization_options: - objective_options: - objectives: - - synthetic_metric: Hartmann4 - scheduler: - total_trials: 10 - experiment: - name: "test_experiment" +objective: + metrics: + - metric: Hartmann4 + metric_type: synthetic_metric +scheduler: + total_trials: 10 model_options: model_specific_options: @@ -37,3 +35,6 @@ parameters: parameter_constraints: - x2 + x1 >= .1 - x2 + x1 + .6*x1 <= .6 + +script_options: + exp_name: "test_experiment"