beamds.beam.hpo package#
Submodules#
beamds.beam.hpo.core module#
beamds.beam.hpo.lifelong_hpo module#
beamds.beam.hpo.optuna module#
beamds.beam.hpo.params module#
- class beamds.beam.hpo.params.HPOConfig(*args, _store_init_path=None, _save_init_args=True, **kwargs)[source]#
Bases:
RayConfig
,ExperimentConfig
- parameters = [BeamParam(name='gpus-per-trial', type=<class 'int'>, default=1, help='number of gpus per trial', tags=None), BeamParam(name='cpus-per-trial', type=<class 'int'>, default=4, help='number of cpus per trial', tags=None), BeamParam(name=['n-trials', 'num-trials'], type=<class 'int'>, default=1000, help='number of HPO trails', tags=None), BeamParam(name='n-jobs', type=<class 'int'>, default=1, help='number of parallel HPO jobs', tags=None), BeamParam(name='time-budget-s', type=<class 'int'>, default=None, help='time budget in seconds', tags=None), BeamParam(name='print-results', type=<class 'bool'>, default=False, help='print the intermediate results during training', tags=None), BeamParam(name='enable-tqdm', type=<class 'bool'>, default=False, help='enable tqdm progress bar', tags=None), BeamParam(name='print-hyperparameters', type=<class 'bool'>, default=True, help='print the hyperparameters before training', tags=None), BeamParam(name='verbose', type=<class 'bool'>, default=True, help='verbose mode in hyperparameter optimization', tags=None), BeamParam(name='track-results', type=<class 'bool'>, default=False, help='track the results of each trial', tags=None), BeamParam(name='track-algorithms', type=<class 'bool'>, default=False, help='track the algorithms of each trial', tags=None), BeamParam(name='track-hparams', type=<class 'bool'>, default=True, help='track the hyperparameters of each trial', tags=None), BeamParam(name='track-suggestion', type=<class 'bool'>, default=True, help='track the suggestions of each trial', tags=None), BeamParam(name='hpo-path', type=<class 'str'>, default='/home/runner/beam_data/projects/hpo', help='Root directory for Logs and results of Hyperparameter optimizations and the associated experiments', tags=None), BeamParam(name='stop', type=<class 'str'>, default=None, help='stop criteria for the HPO', tags=None), BeamParam(name='get-port-from-beam-port-range', type=<class 'bool'>, default=True, help='get port from beam port range', tags=None), BeamParam(name='replay-buffer-size', type=<class 'int'>, default=None, help='Maximal size of finite-memory hpo', tags=None), BeamParam(name='time-window', type=<class 'int'>, default=None, help='Maximal time window of finite-memory hpo', tags=None), BeamParam(name='max-iterations', type=<class 'int'>, default=None, help='Maximal number of iterations for ASHAScheduler', tags=None), BeamParam(name='reduction-factor', type=<class 'int'>, default=2, help='Reduction factor for ASHAScheduler', tags=None), BeamParam(name='grace-period', type=<class 'int'>, default=20, help='Grace period for ASHAScheduler', tags=None), BeamParam(name='report-best-objective', type=<class 'bool'>, default=False, help='Report the best objective at each iteration', tags=None)]#
- class beamds.beam.hpo.params.RayConfig(*args, _store_init_path=None, _save_init_args=True, **kwargs)[source]#
Bases:
BeamConfig
- parameters = [BeamParam(name='include-dashboard', type=<class 'bool'>, default=True, help='include ray-dashboard', tags=None), BeamParam(name='runtime-env', type=<class 'str'>, default=None, help='runtime environment for ray', tags=None), BeamParam(name='dashboard-port', type=<class 'int'>, default=None, help='dashboard port for ray', tags=None), BeamParam(name='ray-address', type=<class 'str'>, default='auto', help='whether to link to existing ray cluster (auto/ip) or to set up a local ray instance (local/ip)', tags=None)]#