Skip to content

Commit

Permalink
Supports new optuna v3.0 series (#19)
Browse files Browse the repository at this point in the history
* Update requirements.txt

* Update tuner.py

* Replace IntUniformDistribution with IntDistribution
* Replace DiscreteUniformDistribution with FloatDistribution

* Remove botorch sampler, this is still experimental

* Update requirements.txt

* Update README.md
  • Loading branch information
fsmosca authored Dec 15, 2022
1 parent 6057093 commit 1970475
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 20 deletions.
14 changes: 6 additions & 8 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
optuna==2.10.0
plotly==4.10.0
scikit-learn==0.24.2
kaleido==0.0.3.post1
scikit-learn==0.24.2
scikit-optimize==0.8.1
pandas==1.1.2
botorch==0.5.1
optuna==3.0.4
plotly==5.11.0
scikit-learn==1.2.0
kaleido==0.2.1
scikit-optimize==0.9.0
pandas==1.5.2
18 changes: 6 additions & 12 deletions tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

__author__ = 'fsmosca'
__script_name__ = 'Optuna Game Parameter Tuner'
__version__ = 'v5.3.0'
__version__ = 'v6.0.0'
__credits__ = ['joergoster', 'musketeerchess', 'optuna']


Expand All @@ -27,7 +27,7 @@
import math

import optuna
from optuna.distributions import IntUniformDistribution, DiscreteUniformDistribution
from optuna.distributions import IntDistribution, FloatDistribution


logger = logging.getLogger()
Expand Down Expand Up @@ -477,9 +477,6 @@ def get_sampler(args_sampler):
consider_pruned_trials=consider_pruned_trials,
n_startup_trials=n_startup_trials), n_startup_trials

if name == 'botorch':
return optuna.integration.BoTorchSampler(), n_startup_trials

logger.exception(f'Error, sampler name "{name}" is not supported, use tpe or cmaes or skopt.')
raise

Expand Down Expand Up @@ -542,7 +539,7 @@ def __call__(self, trial):
par_val = round(trial.suggest_float(k, v['min'], v['max'], step=v['step']), 5)
# Otherwise use integer.
else:
par_val = trial.suggest_int(k, v['min'], v['max'], v['step'])
par_val = trial.suggest_int(k, v['min'], v['max'], step=v['step'])
test_options += f'option.{k}={par_val} '
self.test_param.update({k: par_val})

Expand Down Expand Up @@ -995,10 +992,7 @@ def main():
' skopt has also a consider_pruned_trials parameter which is true by default. To not consider pruned trials use:\n'
' --sampler name=skopt consider_pruned_trials=false ...\n'
' consider_pruned_trials means that during sampling or finding the next best param values, the parameters\n'
' that failed or pruned will be taken into account.\n'
'--sampler name=botorch\n'
' A sampler based on BoTorch or Bayesian Optimization in PyTorch.\n'
' Ref.: https://github.com/pytorch/botorch')
' that failed or pruned will be taken into account.')
parser.add_argument('--threshold-pruner', required=False, nargs='*', action='append',
metavar=('result=', 'games='),
help='A trial pruner used to prune or stop unpromising trials. Example:\n'
Expand Down Expand Up @@ -1145,9 +1139,9 @@ def main():

for k, v in input_param.items():
if 'type' in v and v['type'] == 'float':
distri.update({k: DiscreteUniformDistribution(v['min'], v['max'], v['step'])})
distri.update({k: FloatDistribution(v['min'], v['max'], log=False, step=v['step'])})
else:
distri.update({k: IntUniformDistribution(v['min'], v['max'], v['step'])})
distri.update({k: IntDistribution(v['min'], v['max'], log=False, step=v['step'])})

init_trial = optuna.trial.create_trial(
params=copy.deepcopy(init_param),
Expand Down

0 comments on commit 1970475

Please sign in to comment.