Skip to content

Commit

Permalink
Minor fixes reloading hyperlogger without stats logging
Browse files Browse the repository at this point in the history
  • Loading branch information
RobertTLange committed May 4, 2021
1 parent 9df6305 commit 176fb97
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 9 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ To cite this repository:
}
```

Much of the `mle-toolbox` design has been inspired by discussions with Jonathan Frankle and Nandan Rao about the quest for empirically sound and supported claims.
Much of the `mle-toolbox` design has been inspired by discussions with [Jonathan Frankle](http://www.jfrankle.com/) and [Nandan Rao](https://twitter.com/nandanrao) about the quest for empirically sound and supported claims in Machine Learning.

## Notes, Development & Questions :question:

Expand Down
7 changes: 4 additions & 3 deletions mle_toolbox/experiment/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def run(self):
job_id = self.schedule_remote()
if self.job_status == 1:
self.logger.info(f"Job ID: {job_id} - Remote job scheduled" \
" - {self.config_filename}")
f" - {self.config_filename}")
else:
self.logger.info(f"Job ID: {job_id} - Error when scheduling " \
f"remote job - {self.config_filename}")
Expand All @@ -125,7 +125,7 @@ def run(self):
status_out = self.monitor_local(proc)
if status_out == 0:
self.logger.info("PID: {proc.pid} - Local job successfully " \
"completed - { self.config_filename}")
f"completed - { self.config_filename}")
else:
self.logger.info(f"PID: {proc.pid} - Error when running local " \
f"job - {self.config_filename}")
Expand Down Expand Up @@ -217,7 +217,8 @@ def cluster_available(self):
else:
self.run_on_sge_cluster = 0
self.run_on_slurm_cluster = 0
return on_sge_cluster or on_sge_head or on_slurm_head or on_slurm_cluster
return (on_sge_cluster or on_sge_head or
on_slurm_head or on_slurm_cluster)

def generate_cmd_line_args(self,
cmd_line_input: Union[None, dict]=None) -> str:
Expand Down
7 changes: 4 additions & 3 deletions mle_toolbox/hyperopt/hyperlogger.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def __init__(self, hyperlog_fname: str, max_target: bool=True,
self.max_target = max_target # Whether we want to max target (reward)
self.eval_metrics = eval_metrics # Var names to compare across runs
self.verbose = verbose
self.no_results_logging = no_results_logging # Option to not log metrics
self.no_results_logging = no_results_logging # Want to not log metrics?

# Instantiate the meta-logger
self.logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -90,7 +90,7 @@ def update_log(self, params, meta_eval_log,

def print_log_state(self):
""" Log currently best param config for each metric. """
if self.iter_id > 0:
if self.iter_id > 0 and not self.no_results_logging:
for i, m in enumerate(self.best_per_metric.keys()):
print_framed(m, frame_str="-")
self.logger.info(
Expand Down Expand Up @@ -123,7 +123,8 @@ def reload_log(self):
self.all_run_ids = []

# Get best performing params for each eval metric
self.best_per_metric = self.get_best_performances(self.eval_metrics)
if not self.no_results_logging:
self.best_per_metric = self.get_best_performances(self.eval_metrics)

def get_best_performances(self, eval_metrics):
""" Get best performing hyperparam configuration up to current iter """
Expand Down
2 changes: 1 addition & 1 deletion mle_toolbox/launch/prepare_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def welcome_to_mle_toolbox(verbose=False):
print(85*"=")
time_t = datetime.now().strftime("%m/%d/%Y %I:%M:%S %p")
print(time_t, f"Thx for using MLE-Toolbox {__version__}"
f" Locally, on SGE or Slurm or Clusters.")
f" Locally, on Clusters or the Cloud.")
if verbose:
print(time_t, "It implements the following experiment types:")
print(" - single-experiment: Run a single configuration experiment.")
Expand Down
11 changes: 10 additions & 1 deletion mle_toolbox/utils/mle_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,15 @@
get_extra_cmd_line_input,
set_random_seeds)
from .mle_logger import MLE_Logger
from .helpers import print_framed


class MLExperiment(object):
def __init__(self,
config_fname: str="configs/base_config.json",
auto_setup: bool=True,
create_jax_prng: bool=False):
create_jax_prng: bool=False,
default_seed: int=0):
''' Load the job configs for the MLE experiment. '''
# Load the different configurations for the experiment.
train_config, net_config, log_config, extra_args = get_configs_ready(
Expand All @@ -23,13 +25,20 @@ def __init__(self,
self.log_config = log_config
self.extra_config = extra_config
self.create_jax_prng = create_jax_prng
self.default_seed = default_seed

# Make initial setup optional so that configs can be modified ad-hoc
if auto_setup:
self.setup()

def setup(self):
''' Set the random seed & initialize the logger. '''
# If no seed is provided in train_config - set it to default
if "seed_id" not in self.train_config.keys():
self.train_config.seed_id = self.default_seed
print_framed(f"!!!WARNING!!!: No seed provided - set to default "
f"{self.default_seed}.")

# Set the random seeds for all random number generation
if self.create_jax_prng:
# Return JAX random number generating key
Expand Down

0 comments on commit 176fb97

Please sign in to comment.