diff --git a/.gitignore b/.gitignore index 184c27d1..b9724478 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ x86_64 /cov_reports .coverage coverage.xml +.idea/ \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index ac52d7ce..3dfa2f2c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2020, EPFL/Blue Brain Project +# Copyright (c) 2016-2022, EPFL/Blue Brain Project # # This file is part of BluePyOpt # diff --git a/LICENSE.txt b/LICENSE.txt index 4e39d3be..18dc09dd 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -6,7 +6,7 @@ Examples and test are BSD-licensed. External dependencies are either LGPL or BSD-licensed. See file ACKNOWLEDGEMENTS.txt and AUTHORS.txt for further details. -Copyright (c) Blue Brain Project/EPFL 2016-2021. +Copyright (c) Blue Brain Project/EPFL 2016-2022. This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the diff --git a/README.rst b/README.rst index 56e083c8..484543e7 100644 --- a/README.rst +++ b/README.rst @@ -198,7 +198,7 @@ Funding This work has been partially funded by the European Union Seventh Framework Program (FP7/2007­2013) under grant agreement no. 604102 (HBP), the European Union’s Horizon 2020 Framework Programme for Research and Innovation under the Specific Grant Agreement No. 720270, 785907 (Human Brain Project SGA1/SGA2) and by the EBRAINS research infrastructure, funded from the European Union’s Horizon 2020 Framework Programme for Research and Innovation under the Specific Grant Agreement No. 945539 (Human Brain Project SGA3). This project/research was supported by funding to the Blue Brain Project, a research center of the École polytechnique fédérale de Lausanne (EPFL), from the Swiss government’s ETH Board of the Swiss Federal Institutes of Technology. -Copyright (c) 2016-2021 Blue Brain Project/EPFL +Copyright (c) 2016-2022 Blue Brain Project/EPFL .. The following image is also defined in the index.rst file, as the relative path is diff --git a/bluepyopt/__init__.py b/bluepyopt/__init__.py index 85e44790..ea045732 100644 --- a/bluepyopt/__init__.py +++ b/bluepyopt/__init__.py @@ -1,7 +1,7 @@ """Init script""" """ -Copyright (c) 2016-2020, EPFL/Blue Brain Project +Copyright (c) 2016-2022, EPFL/Blue Brain Project This file is part of BluePyOpt @@ -32,6 +32,7 @@ import bluepyopt.deapext.algorithms import bluepyopt.stoppingCriteria import bluepyopt.deapext.optimisations +import bluepyopt.deapext.optimisationsCMA # Add some backward compatibility for the time when DEAPoptimisation not in # deapext yet diff --git a/bluepyopt/deapext/CMA_MO.py b/bluepyopt/deapext/CMA_MO.py new file mode 100644 index 00000000..e8b650ea --- /dev/null +++ b/bluepyopt/deapext/CMA_MO.py @@ -0,0 +1,247 @@ +"""Multi Objective CMA-es class""" + +""" +Copyright (c) 2016-2022, EPFL/Blue Brain Project + + This file is part of BluePyOpt + + This library is free software; you can redistribute it and/or modify it under + the terms of the GNU Lesser General Public License version 3.0 as published + by the Free Software Foundation. + + This library is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more + details. + + You should have received a copy of the GNU Lesser General Public License + along with this library; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +""" + +# pylint: disable=R0912, R0914 + +import logging +import numpy +import copy +from math import log + +import deap +from deap import base +from deap import cma + +from .stoppingCriteria import MaxNGen +from . import utils +from . import hype + +logger = logging.getLogger("__main__") + + +def get_hyped(pop, ubound_score=250., threshold_improvement=240.): + """Compute the hypervolume contribution of each individual. + The fitness space is first bounded and all dimension who do not show + improvement are ignored. + """ + + # Cap the obj at 250 + points = numpy.array([ind.fitness.values for ind in pop]) + points[points > ubound_score] = ubound_score + lbounds = numpy.min(points, axis=0) + ubounds = numpy.max(points, axis=0) + + # Remove the dimensions that do not show any improvement + to_remove = [] + for i, lb in enumerate(lbounds): + if lb >= threshold_improvement: + to_remove.append(i) + points = numpy.delete(points, to_remove, axis=1) + lbounds = numpy.delete(lbounds, to_remove) + ubounds = numpy.delete(ubounds, to_remove) + + if not len(lbounds): + logger.warning("No dimension along which to compute the hypervolume.") + return [0.] * len(pop) + + # Rescale the objective space + # Note: 2 here is a magic number used to make the hypercube larger than it + # really is. It makes sure that the individual always have a non-zero + # hyper-volume contribution and improves the results while avoiding an + # edge case. + points = (points - lbounds) / numpy.max(ubounds.flatten()) + ubounds = numpy.max(points, axis=0) + 2.0 + + hv = hype.hypeIndicatorSampled( + points=points, bounds=ubounds, k=5, nrOfSamples=200000 + ) + return hv + + +class CMA_MO(cma.StrategyMultiObjective): + """Multiple objective covariance matrix adaption""" + + def __init__( + self, + centroids, + offspring_size, + sigma, + max_ngen, + IndCreator, + RandIndCreator, + weight_hv=0.5, + map_function=None, + use_scoop=False, + ): + """Constructor + + Args: + centroid (list): initial guess used as the starting point of + the CMA-ES + offspring_size (int): number of offspring individuals in each + generation + sigma (float): initial standard deviation of the distribution + max_ngen (int): total number of generation to run + IndCreator (fcn): function returning an individual of the pop + RandIndCreator (fcn): function creating a random individual. + weight_hv (float): between 0 and 1. Weight given to the + hypervolume contribution when computing the score of an + individual in MO-CMA. The weight of the fitness contribution + is computed as 1 - weight_hv. + map_function (map): function used to map (parallelize) the + evaluation function calls + use_scoop (bool): use scoop map for parallel computation + """ + + if offspring_size is None: + lambda_ = int(4 + 3 * log(len(RandIndCreator()))) + else: + lambda_ = offspring_size + + if centroids is None: + starters = [RandIndCreator() for i in range(lambda_)] + else: + if len(centroids) != lambda_: + from itertools import cycle + + generator = cycle(centroids) + starters = [ + copy.deepcopy(next(generator)) for i in range(lambda_) + ] + else: + starters = centroids + + cma.StrategyMultiObjective.__init__( + self, starters, sigma, mu=int(lambda_ * 0.5), lambda_=lambda_ + ) + + self.population = [] + self.problem_size = len(starters[0]) + + self.weight_hv = weight_hv + + self.map_function = map_function + self.use_scoop = use_scoop + + # Toolbox specific to this CMA-ES + self.toolbox = base.Toolbox() + self.toolbox.register("generate", self.generate, IndCreator) + self.toolbox.register("update", self.update) + + if self.use_scoop: + if self.map_function: + raise Exception( + "Impossible to use scoop and provide self defined map " + "function: %s" % self.map_function + ) + from scoop import futures + + self.map_function = futures.map + + # Set termination conditions + self.active = True + if max_ngen <= 0: + max_ngen = 100 + 50 * (self.problem_size + 3) ** 2 / numpy.sqrt( + self.lambda_ + ) + + self.stopping_conditions = [MaxNGen(max_ngen)] + + def _select(self, candidates): + """Select the best candidates of the population + + Fill the next population (chosen) with the Pareto fronts until there + is not enough space. When an entire front does not fit in the space + left we rely on a mixture of hypervolume and fitness. The respective + weights of hypervolume and fitness are "hv" and "1-hv". The remaining + fronts are explicitly not chosen""" + + if self.weight_hv == 0.0: + fit = [numpy.sum(ind.fitness.values) for ind in candidates] + idx_scores = list(numpy.argsort(fit)) + + elif self.weight_hv == 1.0: + hv = get_hyped(candidates) + idx_scores = list(numpy.argsort(hv))[::-1] + + else: + hv = get_hyped(candidates) + idx_hv = list(numpy.argsort(hv))[::-1] + fit = [numpy.sum(ind.fitness.values) for ind in candidates] + idx_fit = list(numpy.argsort(fit)) + scores = [] + for i in range(len(candidates)): + score = (self.weight_hv * idx_hv.index(i)) + ( + (1.0 - self.weight_hv) * idx_fit.index(i) + ) + scores.append(score) + idx_scores = list(numpy.argsort(scores)) + + chosen = [candidates[i] for i in idx_scores[: self.mu]] + not_chosen = [candidates[i] for i in idx_scores[self.mu:]] + return chosen, not_chosen + + def get_population(self, to_space): + """Returns the population in the original parameter space""" + pop = copy.deepcopy(self.population) + for i, ind in enumerate(pop): + for j, v in enumerate(ind): + pop[i][j] = to_space[j](v) + return pop + + def get_parents(self, to_space): + """Returns the population in the original parameter space""" + pop = copy.deepcopy(self.parents) + for i, ind in enumerate(pop): + for j, v in enumerate(ind): + pop[i][j] = to_space[j](v) + return pop + + def generate_new_pop(self, lbounds, ubounds): + """Generate a new population bounded in the normalized space""" + self.population = self.toolbox.generate() + return utils.bound(self.population, lbounds, ubounds) + + def update_strategy(self): + self.toolbox.update(self.population) + + def set_fitness(self, fitnesses): + for f, ind in zip(fitnesses, self.population): + ind.fitness.values = f + + def set_fitness_parents(self, fitnesses): + for f, ind in zip(fitnesses, self.parents): + ind.fitness.values = f + + def check_termination(self, gen): + stopping_params = { + "gen": gen, + "population": self.population, + } + + [c.check(stopping_params) for c in self.stopping_conditions] + for c in self.stopping_conditions: + if c.criteria_met: + logger.info( + "CMA stopped because of termination criteria: " + + " ".join(c.name) + ) + self.active = False diff --git a/bluepyopt/deapext/CMA_SO.py b/bluepyopt/deapext/CMA_SO.py new file mode 100644 index 00000000..6b5c5e91 --- /dev/null +++ b/bluepyopt/deapext/CMA_SO.py @@ -0,0 +1,225 @@ +"""Single Objective CMA-es class""" + +""" +Copyright (c) 2016-2022, EPFL/Blue Brain Project + + This file is part of BluePyOpt + + This library is free software; you can redistribute it and/or modify it under + the terms of the GNU Lesser General Public License version 3.0 as published + by the Free Software Foundation. + + This library is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more + details. + + You should have received a copy of the GNU Lesser General Public License + along with this library; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +""" + +# pylint: disable=R0912, R0914 + +import logging +import numpy +from math import sqrt, log +import copy + +from deap import base +from deap import cma + +from .stoppingCriteria import ( + MaxNGen, + Stagnation, + TolHistFun, + EqualFunVals, + NoEffectAxis, + TolUpSigma, + TolX, + ConditionCov, + NoEffectCoor, +) + +from . import utils + +logger = logging.getLogger("__main__") + + +class CMA_SO(cma.Strategy): + """Single objective covariance matrix adaption""" + + def __init__( + self, + centroids, + offspring_size, + sigma, + max_ngen, + IndCreator, + RandIndCreator, + map_function=None, + use_scoop=False, + ): + """Constructor + + Args: + centroid (list): initial guess used as the starting point of + the CMA-ES + offspring_size (int): number of offspring individuals in each + generation + sigma (float): initial standard deviation of the distribution + max_ngen (int): total number of generation to run + IndCreator (fcn): function returning an individual of the pop + RandIndCreator (fcn): function creating a random individual. + map_function (map): function used to map (parallelize) the + evaluation function calls + use_scoop (bool): use scoop map for parallel computation + """ + + if offspring_size is None: + lambda_ = int(4 + 3 * log(len(RandIndCreator()))) + else: + lambda_ = offspring_size + + if centroids is None: + starter = RandIndCreator() + else: + starter = centroids[0] + + cma.Strategy.__init__(self, starter, sigma, lambda_=lambda_) + + self.population = [] + self.problem_size = len(starter) + + self.map_function = map_function + self.use_scoop = use_scoop + + # Toolbox specific to this CMA-ES + self.toolbox = base.Toolbox() + self.toolbox.register("generate", self.generate, IndCreator) + self.toolbox.register("update", self.update) + + # Set termination conditions + self.active = True + if max_ngen <= 0: + max_ngen = 100 + 50 * (self.problem_size + 3) ** 2 / numpy.sqrt( + self.lambda_ + ) + + self.stopping_conditions = [ + MaxNGen(max_ngen), + Stagnation(self.lambda_, self.problem_size), + TolHistFun(self.lambda_, self.problem_size), + EqualFunVals(self.lambda_, self.problem_size), + NoEffectAxis(self.problem_size), + TolUpSigma(float(self.sigma)), + TolX(), + ConditionCov(), + NoEffectCoor(), + ] + + def update(self, population): + """Update the current covariance matrix strategy from the + population""" + + population.sort(key=lambda ind: ind.fitness.weighted_reduce, + reverse=True) + + old_centroid = self.centroid + self.centroid = numpy.dot(self.weights, population[0:self.mu]) + + c_diff = self.centroid - old_centroid + + # Cumulation : update evolution path + self.ps = (1 - self.cs) * self.ps + sqrt( + self.cs * (2 - self.cs) * self.mueff + ) / self.sigma * numpy.dot( + self.B, (1.0 / self.diagD) * numpy.dot(self.B.T, c_diff) + ) # noqa + + hsig = float( + ( + numpy.linalg.norm(self.ps) + / sqrt(1.0 - (1.0 - self.cs) ** + (2.0 * (self.update_count + 1.0))) + / self.chiN + < (1.4 + 2.0 / (self.dim + 1.0)) + ) + ) # noqa + + self.update_count += 1 + + self.pc = (1 - self.cc) * self.pc + hsig * sqrt( + self.cc * (2 - self.cc) * self.mueff + ) / self.sigma * c_diff + + # Update covariance matrix + artmp = population[0:self.mu] - old_centroid + self.C = ( + ( + 1 + - self.ccov1 + - self.ccovmu + + (1 - hsig) * self.ccov1 * self.cc * (2 - self.cc) + ) + * self.C + + self.ccov1 * numpy.outer(self.pc, self.pc) + + self.ccovmu * numpy.dot((self.weights * artmp.T), artmp) + / self.sigma ** 2 + ) + + self.sigma *= numpy.exp( + (numpy.linalg.norm(self.ps) / self.chiN - 1.0) * self.cs + / self.damps + ) + + self.diagD, self.B = numpy.linalg.eigh(self.C) + indx = numpy.argsort(self.diagD) + + self.cond = self.diagD[indx[-1]] / self.diagD[indx[0]] + + self.diagD = self.diagD[indx] ** 0.5 + self.B = self.B[:, indx] + self.BD = self.B * self.diagD + + def get_population(self, to_space): + """Returns the population in the original parameter space""" + pop = copy.deepcopy(self.population) + for i, ind in enumerate(pop): + for j, v in enumerate(ind): + pop[i][j] = to_space[j](v) + return pop + + def generate_new_pop(self, lbounds, ubounds): + """Generate a new population bounded in the normalized space""" + self.population = self.toolbox.generate() + return utils.bound(self.population, lbounds, ubounds) + + def update_strategy(self): + self.toolbox.update(self.population) + + def set_fitness(self, fitnesses): + for f, ind in zip(fitnesses, self.population): + ind.fitness.values = f + + def check_termination(self, gen): + stopping_params = { + "gen": gen, + "population": self.population, + "centroid": self.centroid, + "pc": self.pc, + "C": self.C, + "B": self.B, + "sigma": self.sigma, + "diagD": self.diagD, + "cond": self.cond, + } + + [c.check(stopping_params) for c in self.stopping_conditions] + for c in self.stopping_conditions: + if c.criteria_met: + logger.info( + "CMA stopped because of termination criteria: " + + " ".join(c.name) + ) + self.active = False diff --git a/bluepyopt/deapext/algorithms.py b/bluepyopt/deapext/algorithms.py index c309cab7..9cfe7f12 100644 --- a/bluepyopt/deapext/algorithms.py +++ b/bluepyopt/deapext/algorithms.py @@ -1,7 +1,7 @@ """Optimisation class""" """ -Copyright (c) 2016-2020, EPFL/Blue Brain Project +Copyright (c) 2016-2022, EPFL/Blue Brain Project This file is part of BluePyOpt @@ -32,6 +32,7 @@ import pickle from .stoppingCriteria import MaxNGen +from . import utils logger = logging.getLogger('__main__') @@ -63,23 +64,6 @@ def _evaluate_invalid_fitness(toolbox, population): return len(invalid_ind) -def _update_history_and_hof(halloffame, history, population): - '''Update the hall of fame with the generated individuals - - Note: History and Hall-of-Fame behave like dictionaries - ''' - if halloffame is not None: - halloffame.update(population) - - history.update(population) - - -def _record_stats(stats, logbook, gen, population, invalid_count): - '''Update the statistics with the new population''' - record = stats.compile(population) if stats is not None else {} - logbook.record(gen=gen, nevals=invalid_count, **record) - - def _get_offspring(parents, toolbox, cxpb, mutpb): '''return the offspring, use toolbox.variate if possible''' if hasattr(toolbox, 'variate'): @@ -109,7 +93,9 @@ def eaAlphaMuPlusLambdaCheckpoint( halloffame=None, cp_frequency=1, cp_filename=None, - continue_cp=False): + continue_cp=False, + terminator=None, + param_names=None): r"""This is the :math:`(~\alpha,\mu~,~\lambda)` evolutionary algorithm Args: @@ -124,8 +110,14 @@ def eaAlphaMuPlusLambdaCheckpoint( cp_frequency(int): generations between checkpoints cp_filename(string): path to checkpoint filename continue_cp(bool): whether to continue + terminator (multiprocessing.Event): exit loop when is set. + Not taken into account if None. + param_names(list): names of the parameters optimized by the evaluator """ + if param_names is None: + param_names = [] + if cp_filename: cp_filename_tmp = cp_filename + '.tmp' @@ -156,22 +148,27 @@ def eaAlphaMuPlusLambdaCheckpoint( history = deap.tools.History() invalid_count = _evaluate_invalid_fitness(toolbox, population) - _update_history_and_hof(halloffame, history, population) - _record_stats(stats, logbook, start_gen, population, invalid_count) + + utils.update_history_and_hof(halloffame, history, population) + utils.record_stats( + stats, logbook, start_gen, population, invalid_count + ) stopping_criteria = [MaxNGen(ngen)] # Begin the generational process gen = start_gen + 1 stopping_params = {"gen": gen} - while not(_check_stopping_criteria(stopping_criteria, stopping_params)): + while utils.run_next_gen( + not(_check_stopping_criteria(stopping_criteria, stopping_params)), + terminator): offspring = _get_offspring(parents, toolbox, cxpb, mutpb) population = parents + offspring invalid_count = _evaluate_invalid_fitness(toolbox, offspring) - _update_history_and_hof(halloffame, history, population) - _record_stats(stats, logbook, gen, population, invalid_count) + utils.update_history_and_hof(halloffame, history, population) + utils.record_stats(stats, logbook, gen, population, invalid_count) # Select the next generation parents parents = toolbox.select(population, mu) @@ -186,7 +183,8 @@ def eaAlphaMuPlusLambdaCheckpoint( halloffame=halloffame, history=history, logbook=logbook, - rndstate=random.getstate()) + rndstate=random.getstate(), + param_names=param_names) pickle.dump(cp, open(cp_filename_tmp, "wb")) if os.path.isfile(cp_filename_tmp): shutil.copy(cp_filename_tmp, cp_filename) diff --git a/bluepyopt/deapext/hype.py b/bluepyopt/deapext/hype.py new file mode 100644 index 00000000..00163c35 --- /dev/null +++ b/bluepyopt/deapext/hype.py @@ -0,0 +1,120 @@ +""" +Copyright (c) 2016-2022, EPFL/Blue Brain Project + + This file is part of BluePyOpt + + This library is free software; you can redistribute it and/or modify it under + the terms of the GNU Lesser General Public License version 3.0 as published + by the Free Software Foundation. + + This library is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more + details. + + You should have received a copy of the GNU Lesser General Public License + along with this library; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +""" +import numpy + + +def hypesub(la, A, actDim, bounds, pvec, alpha, k): + """HypE algorithm sub function""" + + h = numpy.zeros(la) + i = numpy.argsort(A[:, actDim - 1]) + S = A[i] + pvec = pvec[i] + + for i in range(1, S.shape[0] + 1): + + if i < S.shape[0]: + extrusion = S[i, actDim - 1] - S[i - 1, actDim - 1] + else: + extrusion = bounds[actDim - 1] - S[i - 1, actDim - 1] + + if actDim == 1: + if i > k: + break + if alpha[i - 1] >= 0: + h[pvec[0:i]] += extrusion * alpha[i - 1] + elif extrusion > 0.0: + h += extrusion * hypesub( + la, S[0:i, :], actDim - 1, bounds, pvec[0:i], alpha, k + ) + + return h + + +def hypeIndicatorExact(points, bounds, k): + """HypE algorithm. Python implementation of the Matlab code available at + https://sop.tik.ee.ethz.ch/download/supplementary/hype/ + + Args: + points(array): 2D array containing the objective values of the + population + bounds(array): 1D array containing the reference point from which to + compute the hyper-volume + k(int): HypE parameter + """ + + Ps = points.shape[0] + if k < 0: + k = Ps + actDim = points.shape[1] + pvec = numpy.arange(points.shape[0]) + + alpha = [] + for i in range(1, k + 1): + j = numpy.arange(1, i) + alpha.append(numpy.prod((k - j) / (Ps - j) / i)) + alpha = numpy.asarray(alpha) + + return hypesub(points.shape[0], points, actDim, bounds, pvec, alpha, k) + + +def hypeIndicatorSampled(points, bounds, k, nrOfSamples): + """Monte-Carlo approximation of the HypE algorithm. Python implementation + of the Matlab code available at + https://sop.tik.ee.ethz.ch/download/supplementary/hype/ + + Args: + points(array): 2D array containing the objective values of the + population + bounds(array): 1D array containing the reference point from which to + compute the hyper-volume + k(int): HypE parameter + nrOfSamples(int): number of random samples to use for the + Monte-Carlo approximation + """ + + nrP = points.shape[0] + dim = points.shape[1] + F = numpy.zeros(nrP) + + BoxL = numpy.min(points, axis=0) + + alpha = [] + for i in range(1, k + 1): + j = numpy.arange(1, i) + alpha.append(numpy.prod((k - j) / (nrP - j) / i)) + alpha = numpy.asarray(alpha + [0.0] * nrP) + + S = numpy.random.uniform(low=BoxL, high=bounds, size=(nrOfSamples, dim)) + + dominated = numpy.zeros(nrOfSamples, dtype="uint") + for j in range(1, nrP + 1): + B = S - points[j - 1] + ind = numpy.sum(B >= 0, axis=1) == dim + dominated[ind] += 1 + + for j in range(1, nrP + 1): + B = S - points[j - 1] + ind = numpy.sum(B >= 0, axis=1) == dim + x = dominated[ind] + F[j - 1] = numpy.sum(alpha[x - 1]) + + F = F * numpy.prod(bounds - BoxL) / nrOfSamples + + return F diff --git a/bluepyopt/deapext/optimisations.py b/bluepyopt/deapext/optimisations.py index 9af98860..8630bc62 100644 --- a/bluepyopt/deapext/optimisations.py +++ b/bluepyopt/deapext/optimisations.py @@ -1,7 +1,7 @@ """Optimisation class""" """ -Copyright (c) 2016-2020, EPFL/Blue Brain Project +Copyright (c) 2016-2022, EPFL/Blue Brain Project This file is part of BluePyOpt @@ -33,6 +33,7 @@ from . import algorithms from . import tools +from . import utils import bluepyopt.optimisations @@ -106,6 +107,7 @@ def __init__(self, evaluator=None, Args: evaluator (Evaluator): Evaluator object + use_scoop (bool): use scoop map for parallel computation seed (float): Random number generator seed offspring_size (int): Number of offspring individuals in each generation @@ -159,6 +161,11 @@ def setup_deap(self): # Number of parameters IND_SIZE = len(self.evaluator.params) + if IND_SIZE == 0: + raise ValueError( + "Length of evaluator.params is zero. At least one " + "non-fix parameter is needed to run an optimization." + ) # Bounds for the parameters @@ -169,19 +176,9 @@ def setup_deap(self): LOWER.append(parameter.lower_bound) UPPER.append(parameter.upper_bound) - # Define a function that will uniformly pick an individual - def uniform(lower_list, upper_list, dimensions): - """Fill array """ - - if hasattr(lower_list, '__iter__'): - return [random.uniform(lower, upper) for lower, upper in - zip(lower_list, upper_list)] - else: - return [random.uniform(lower_list, upper_list) - for _ in range(dimensions)] - # Register the 'uniform' function - self.toolbox.register("uniformparams", uniform, LOWER, UPPER, IND_SIZE) + self.toolbox.register("uniformparams", utils.uniform, LOWER, UPPER, + IND_SIZE) # Register the individual format # An indiviual is create by WSListIndividual and parameters @@ -233,12 +230,9 @@ def uniform(lower_list, upper_list, dimensions): raise ValueError('DEAPOptimisation: Constructor selector_name ' 'argument only accepts "IBEA" or "NSGA2"') - def _reduce_method(meth): - """Overwrite reduce""" - return (getattr, (meth.__self__, meth.__func__.__name__)) import copyreg import types - copyreg.pickle(types.MethodType, _reduce_method) + copyreg.pickle(types.MethodType, utils.reduce_method) if self.use_scoop: if self.map_function: @@ -259,7 +253,8 @@ def run(self, continue_cp=False, cp_filename=None, cp_frequency=1, - parent_population=None): + parent_population=None, + terminator=None): """Run optimisation""" # Allow run function to override offspring_size # TODO probably in the future this should not be an object field @@ -303,6 +298,10 @@ def run(self, stats.register("min", numpy.min) stats.register("max", numpy.max) + param_names = [] + if hasattr(self.evaluator, "param_names"): + param_names = self.evaluator.param_names + pop, hof, log, history = algorithms.eaAlphaMuPlusLambdaCheckpoint( pop, self.toolbox, @@ -314,7 +313,9 @@ def run(self, halloffame=self.hof, cp_frequency=cp_frequency, continue_cp=continue_cp, - cp_filename=cp_filename) + cp_filename=cp_filename, + terminator=terminator, + param_names=param_names) # Update hall of fame self.hof = hof diff --git a/bluepyopt/deapext/optimisationsCMA.py b/bluepyopt/deapext/optimisationsCMA.py new file mode 100644 index 00000000..0e036fea --- /dev/null +++ b/bluepyopt/deapext/optimisationsCMA.py @@ -0,0 +1,375 @@ +"""CMA Optimisation class""" + +""" +Copyright (c) 2016-2022, EPFL/Blue Brain Project + + This file is part of BluePyOpt + + This library is free software; you can redistribute it and/or modify it under + the terms of the GNU Lesser General Public License version 3.0 as published + by the Free Software Foundation. + + This library is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more + details. + + You should have received a copy of the GNU Lesser General Public License + along with this library; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +""" + +import logging +import numpy +import pickle +import random +import functools +import shutil +import os + +import deap.tools + +from .CMA_SO import CMA_SO +from .CMA_MO import CMA_MO +from . import utils + +import bluepyopt.optimisations + +logger = logging.getLogger("__main__") + + +def _ind_convert_space(ind, convert_fcn): + """util function to pass the individual from normalized to real space and + inversely""" + + return [f(x) for f, x in zip(convert_fcn, ind)] + + +class DEAPOptimisationCMA(bluepyopt.optimisations.Optimisation): + + """Optimisation class for CMA-based evolution strategies""" + + def __init__( + self, + evaluator=None, + use_scoop=False, + seed=1, + offspring_size=None, + centroids=None, + sigma=0.4, + map_function=None, + hof=None, + selector_name="single_objective", + weight_hv=0.5, + fitness_reduce=numpy.sum, + ): + """Constructor + + Args: + evaluator (Evaluator): Evaluator object + use_scoop (bool): use scoop map for parallel computation + seed (float): Random number generator seed + offspring_size (int): Number of offspring individuals in each + generation + centroids (list): list of initial guesses used as the starting + points of the CMA-ES + sigma (float): initial standard deviation of the distribution + map_function (function): Function used to map (parallelize) the + evaluation function calls + hof (hof): Hall of Fame object + selector_name (str): The selector used in the evolutionary + algorithm, possible values are 'single_objective' or + 'multi_objective' + weight_hv (float): between 0 and 1. Weight given to the + hyper-volume contribution when computing the score of an + individual in MO-CMA. The weight of the fitness contribution + is computed as 1 - weight_hv. + fitness_reduce (fcn): function used to reduce the objective values + to a single fitness score + """ + + super(DEAPOptimisationCMA, self).__init__(evaluator=evaluator) + + self.use_scoop = use_scoop + self.seed = seed + self.map_function = map_function + + self.hof = hof + if self.hof is None: + self.hof = deap.tools.HallOfFame(10) + + self.offspring_size = offspring_size + + self.fitness_reduce = fitness_reduce + self.centroids = centroids + self.sigma = sigma + + if weight_hv > 1.0 or weight_hv < 0.0: + raise Exception("weight_hv has to be between 0 and 1.") + self.weight_hv = weight_hv + + self.selector_name = selector_name + if self.selector_name == "single_objective": + self.cma_creator = CMA_SO + elif self.selector_name == "multi_objective": + self.cma_creator = CMA_MO + else: + raise Exception( + "The selector_name has to be 'single_objective' " + "or 'multi_objective'. Not " + "{}".format(self.selector_name) + ) + + # Number of objective values + self.problem_size = len(self.evaluator.params) + + # Number of parameters + self.ind_size = len(self.evaluator.objectives) + + # Create a DEAP toolbox + self.toolbox = deap.base.Toolbox() + + # Bounds for the parameters + self.lbounds = [p.lower_bound for p in self.evaluator.params] + self.ubounds = [p.upper_bound for p in self.evaluator.params] + + # Instantiate functions converting individuals from the original + # parameter space to (and from) a normalized space bounded to [-1.;1] + self.ubounds = numpy.asarray(self.ubounds) + self.lbounds = numpy.asarray(self.lbounds) + bounds_radius = (self.ubounds - self.lbounds) / 2.0 + bounds_mean = (self.ubounds + self.lbounds) / 2.0 + + self.to_norm = [] + self.to_space = [] + for r, m in zip(bounds_radius, bounds_mean): + self.to_norm.append( + functools.partial( + lambda param, bm, br: (param - bm) / br, + bm=m, + br=r) + ) + self.to_space.append( + functools.partial( + lambda param, bm, br: (param * br) + bm, + bm=m, + br=r + ) + ) + + # Overwrite the bounds with -1. and 1. + self.lbounds = numpy.full(self.problem_size, -1.0) + self.ubounds = numpy.full(self.problem_size, 1.0) + + self.setup_deap() + + # In case initial guesses were provided, rescale them to the norm space + if self.centroids is not None: + self.centroids = [ + self.toolbox.Individual(_ind_convert_space(ind, self.to_norm)) + for ind in centroids + ] + + def setup_deap(self): + """Set up optimisation""" + + # Set random seed + random.seed(self.seed) + numpy.random.seed(self.seed) + + # Register the 'uniform' function + self.toolbox.register( + "uniformparams", + utils.uniform, + self.lbounds, + self.ubounds, + self.ind_size + ) + + # Register the individual format + self.toolbox.register( + "Individual", + functools.partial( + utils.WSListIndividual, + obj_size=self.ind_size, + reduce_fcn=self.fitness_reduce, + ), + ) + + # A Random Individual is created by ListIndividual and parameters are + # initially picked by 'uniform' + self.toolbox.register( + "RandomInd", + deap.tools.initIterate, + self.toolbox.Individual, + self.toolbox.uniformparams, + ) + + # Register the population format. It is a list of individuals + self.toolbox.register( + "population", deap.tools.initRepeat, list, self.toolbox.RandomInd + ) + + # Register the evaluation function for the individuals + self.toolbox.register("evaluate", self.evaluator.evaluate_with_lists) + + import copyreg + import types + + copyreg.pickle(types.MethodType, utils.reduce_method) + + if self.use_scoop: + if self.map_function: + raise Exception( + "Impossible to use scoop is providing self defined map " + "function: %s" % self.map_function + ) + + from scoop import futures + + self.toolbox.register("map", futures.map) + + elif self.map_function: + self.toolbox.register("map", self.map_function) + + def run( + self, + max_ngen=0, + cp_frequency=1, + continue_cp=False, + cp_filename=None, + terminator=None, + ): + """ Run the optimizer until a stopping criteria is met. + + Args: + max_ngen(int): Total number of generation to run + cp_frequency(int): generations between checkpoints + continue_cp(bool): whether to continue + cp_filename(string): path to checkpoint filename + terminator (multiprocessing.Event): exit loop when is set. + Not taken into account if None. + """ + + if cp_filename: + cp_filename_tmp = cp_filename + '.tmp' + + stats = self.get_stats() + + if continue_cp: + + # A file name has been given, then load the data from the file + cp = pickle.load(open(cp_filename, "rb")) + gen = cp["generation"] + self.hof = cp["halloffame"] + logbook = cp["logbook"] + history = cp["history"] + random.setstate(cp["rndstate"]) + numpy.random.set_state(cp["np_rndstate"]) + CMA_es = cp["CMA_es"] + CMA_es.map_function = self.map_function + + else: + + history = deap.tools.History() + logbook = deap.tools.Logbook() + logbook.header = ["gen", "nevals"] + stats.fields + + # Instantiate the CMA strategy centered on the centroids + CMA_es = self.cma_creator( + centroids=self.centroids, + offspring_size=self.offspring_size, + sigma=self.sigma, + max_ngen=max_ngen, + IndCreator=self.toolbox.Individual, + RandIndCreator=self.toolbox.RandomInd, + map_function=self.map_function, + use_scoop=self.use_scoop, + ) + + if self.selector_name == "multi_objective": + CMA_es.weight_hv = self.weight_hv + to_evaluate = CMA_es.get_parents(self.to_space) + fitness = self.toolbox.map(self.toolbox.evaluate, to_evaluate) + fitness = list(map(list, fitness)) + CMA_es.set_fitness_parents(fitness) + + gen = 1 + + pop = CMA_es.get_population(self.to_space) + + param_names = [] + if hasattr(self.evaluator, "param_names"): + param_names = self.evaluator.param_names + + # Run until a termination criteria is met + while utils.run_next_gen(CMA_es.active, terminator): + logger.info("Generation {}".format(gen)) + + # Generate the new populations + n_out = CMA_es.generate_new_pop( + lbounds=self.lbounds, ubounds=self.ubounds + ) + logger.debug( + "Number of individuals outside of bounds: {} ({:.2f}%)".format( + n_out, + 100.0 * n_out / len(CMA_es.population) + ) + ) + + # Get all the individuals in the original space for evaluation + to_evaluate = CMA_es.get_population(self.to_space) + + # Compute the fitness + fitness = self.toolbox.map(self.toolbox.evaluate, to_evaluate) + fitness = list(map(list, fitness)) + nevals = len(to_evaluate) + CMA_es.set_fitness(fitness) + + # Update the hall of fame, history and logbook + pop = CMA_es.get_population(self.to_space) + utils.update_history_and_hof(self.hof, history, pop) + record = utils.record_stats(stats, logbook, gen, pop, nevals) + logger.info(logbook.stream) + + # Update the CMA strategy using the new fitness and check if + # termination conditions were reached + CMA_es.update_strategy() + CMA_es.check_termination(gen) + + if cp_filename and cp_frequency and gen % cp_frequency == 0: + + # Map function shouldn't be pickled + temp_mf = CMA_es.map_function + CMA_es.map_function = None + + cp = dict( + population=pop, + generation=gen, + halloffame=self.hof, + history=history, + logbook=logbook, + rndstate=random.getstate(), + np_rndstate=numpy.random.get_state(), + CMA_es=CMA_es, + param_names=param_names, + ) + pickle.dump(cp, open(cp_filename_tmp, "wb")) + if os.path.isfile(cp_filename_tmp): + shutil.copy(cp_filename_tmp, cp_filename) + logger.debug('Wrote checkpoint to %s', cp_filename) + + CMA_es.map_function = temp_mf + + gen += 1 + + return pop, self.hof, logbook, history + + def get_stats(self): + """Get the stats that will be saved during optimisation""" + stats = deap.tools.Statistics(key=lambda ind: ind.fitness.reduce) + stats.register("avg", numpy.mean) + stats.register("std", numpy.std) + stats.register("min", numpy.min) + stats.register("max", numpy.max) + return stats diff --git a/bluepyopt/deapext/stoppingCriteria.py b/bluepyopt/deapext/stoppingCriteria.py index ad78ac65..1b0db35b 100644 --- a/bluepyopt/deapext/stoppingCriteria.py +++ b/bluepyopt/deapext/stoppingCriteria.py @@ -1,15 +1,19 @@ """StoppingCriteria class""" """ -Copyright (c) 2016-2020, EPFL/Blue Brain Project +Copyright (c) 2016-2022, EPFL/Blue Brain Project + This file is part of BluePyOpt + This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 3.0 as published by the Free Software Foundation. + This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. + You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. @@ -20,13 +24,20 @@ import logging import numpy +from collections import deque + import bluepyopt.stoppingCriteria -logger = logging.getLogger('__main__') +logger = logging.getLogger("__main__") + + +def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): + return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) class MaxNGen(bluepyopt.stoppingCriteria.StoppingCriteria): """Max ngen stopping criteria class""" + name = "Max ngen" def __init__(self, max_ngen): @@ -37,6 +48,215 @@ def __init__(self, max_ngen): def check(self, kwargs): """Check if the maximum number of iteration is reached""" gen = kwargs.get("gen") - if gen > self.max_ngen: self.criteria_met = True + + +class Stagnation(bluepyopt.stoppingCriteria.StoppingCriteria): + """Stagnation stopping criteria class""" + + name = "Stagnation" + + def __init__(self, lambda_, problem_size): + """Constructor""" + super(Stagnation, self).__init__() + + self.lambda_ = lambda_ + self.problem_size = problem_size + self.stagnation_iter = None + + self.best = [] + self.median = [] + + def check(self, kwargs): + """Check if the population stopped improving""" + ngen = kwargs.get("gen") + population = kwargs.get("population") + fitness = [ind.fitness.reduce for ind in population] + fitness.sort() + + self.best.append(fitness[0]) + self.median.append(fitness[int(round(len(fitness) / 2.0))]) + self.stagnation_iter = int( + numpy.ceil( + 0.2 * ngen + 120 + 30.0 * self.problem_size / self.lambda_ + ) + ) + + cbest = len(self.best) > self.stagnation_iter + cmed = len(self.median) > self.stagnation_iter + cbest2 = numpy.median(self.best[-20:]) >= numpy.median( + self.best[-self.stagnation_iter:-self.stagnation_iter + 20] + ) + cmed2 = numpy.median(self.median[-20:]) >= numpy.median( + self.median[-self.stagnation_iter:-self.stagnation_iter + 20] + ) + if cbest and cmed and cbest2 and cmed2: + self.criteria_met = True + + +class TolHistFun(bluepyopt.stoppingCriteria.StoppingCriteria): + """TolHistFun stopping criteria class""" + + name = "TolHistFun" + + def __init__(self, lambda_, problem_size): + """Constructor""" + super(TolHistFun, self).__init__() + self.tolhistfun = 10 ** -12 + self.mins = deque( + maxlen=10 + int(numpy.ceil(30.0 * problem_size / lambda_)) + ) + + def check(self, kwargs): + """Check if the range of the best values is smaller than + the threshold""" + population = kwargs.get("population") + self.mins.append(numpy.min([ind.fitness.reduce for ind in population])) + + if ( + len(self.mins) == self.mins.maxlen + and max(self.mins) - min(self.mins) < self.tolhistfun + ): + self.criteria_met = True + + +class EqualFunVals(bluepyopt.stoppingCriteria.StoppingCriteria): + """EqualFunVals stopping criteria class""" + + name = "EqualFunVals" + + def __init__(self, lambda_, problem_size): + """Constructor""" + super(EqualFunVals, self).__init__() + self.problem_size = problem_size + self.equalvals = float(problem_size) / 3.0 + self.equalvals_k = int(numpy.ceil(0.1 + lambda_ / 4.0)) + self.equalvalues = [] + + def check(self, kwargs): + """Check if in 1/3rd of the last problem_size iterations the best and + k'th best solutions are equal""" + ngen = kwargs.get("gen") + population = kwargs.get("population") + + fitness = [ind.fitness.reduce for ind in population] + fitness.sort() + + if isclose(fitness[0], fitness[-self.equalvals_k], rel_tol=1e-6): + self.equalvalues.append(1) + else: + self.equalvalues.append(0) + + if ( + ngen > self.problem_size + and sum(self.equalvalues[-self.problem_size:]) > self.equalvals + ): + self.criteria_met = True + + +class TolX(bluepyopt.stoppingCriteria.StoppingCriteria): + """TolX stopping criteria class""" + + name = "TolX" + + def __init__(self): + """Constructor""" + super(TolX, self).__init__() + self.tolx = 10 ** -12 + + def check(self, kwargs): + """Check if all components of pc and sqrt(diag(C)) are smaller than + a threshold""" + pc = kwargs.get("pc") + C = kwargs.get("C") + + if all(pc < self.tolx) and all(numpy.sqrt(numpy.diag(C)) < self.tolx): + self.criteria_met = True + + +class TolUpSigma(bluepyopt.stoppingCriteria.StoppingCriteria): + """TolUpSigma stopping criteria class""" + + name = "TolUpSigma" + + def __init__(self, sigma0): + """Constructor""" + super(TolUpSigma, self).__init__() + self.sigma0 = sigma0 + self.tolupsigma = 10 ** 20 + + def check(self, kwargs): + """Check if the sigma/sigma0 ratio is bigger than a threshold""" + sigma = kwargs.get("sigma") + diagD = kwargs.get("diagD") + + if sigma / self.sigma0 > float(diagD[-1] ** 2) * self.tolupsigma: + self.criteria_met = True + + +class ConditionCov(bluepyopt.stoppingCriteria.StoppingCriteria): + """ConditionCov stopping criteria class""" + + name = "ConditionCov" + + def __init__(self): + """Constructor""" + super(ConditionCov, self).__init__() + self.conditioncov = 10 ** 14 + + def check(self, kwargs): + """Check if the condition number of the covariance matrix is + too large""" + cond = kwargs.get("cond") + + if cond > self.conditioncov: + self.criteria_met = True + + +class NoEffectAxis(bluepyopt.stoppingCriteria.StoppingCriteria): + """NoEffectAxis stopping criteria class""" + + name = "NoEffectAxis" + + def __init__(self, problem_size): + """Constructor""" + super(NoEffectAxis, self).__init__() + self.conditioncov = 10 ** 14 + self.problem_size = problem_size + + def check(self, kwargs): + """Check if the coordinate axis std is too low""" + ngen = kwargs.get("gen") + centroid = kwargs.get("centroid") + sigma = kwargs.get("sigma") + diagD = kwargs.get("diagD") + B = kwargs.get("B") + + noeffectaxis_index = ngen % self.problem_size + + if all( + centroid + == centroid + + 0.1 * sigma * diagD[-noeffectaxis_index] * B[-noeffectaxis_index] + ): + self.criteria_met = True + + +class NoEffectCoor(bluepyopt.stoppingCriteria.StoppingCriteria): + """NoEffectCoor stopping criteria class""" + + name = "NoEffectCoor" + + def __init__(self): + """Constructor""" + super(NoEffectCoor, self).__init__() + + def check(self, kwargs): + """Check if main axis std has no effect""" + centroid = kwargs.get("centroid") + sigma = kwargs.get("sigma") + C = kwargs.get("C") + + if any(centroid == centroid + 0.2 * sigma * numpy.diag(C)): + self.criteria_met = True diff --git a/bluepyopt/deapext/tools/selIBEA.py b/bluepyopt/deapext/tools/selIBEA.py index 53133bea..2cdf3668 100644 --- a/bluepyopt/deapext/tools/selIBEA.py +++ b/bluepyopt/deapext/tools/selIBEA.py @@ -5,7 +5,7 @@ from past.builtins import xrange # pylint: disable=W0622 """ -Copyright (c) 2016-2020, EPFL/Blue Brain Project +Copyright (c) 2016-2022, EPFL/Blue Brain Project This file is part of BluePyOpt diff --git a/bluepyopt/deapext/utils.py b/bluepyopt/deapext/utils.py new file mode 100644 index 00000000..b071b45d --- /dev/null +++ b/bluepyopt/deapext/utils.py @@ -0,0 +1,146 @@ +"""Utils function""" + +""" +Copyright (c) 2016-2022, EPFL/Blue Brain Project + + This file is part of BluePyOpt + + This library is free software; you can redistribute it and/or modify it under + the terms of the GNU Lesser General Public License version 3.0 as published + by the Free Software Foundation. + + This library is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more + details. + + You should have received a copy of the GNU Lesser General Public License + along with this library; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +""" + +import numpy +import random + +import deap.base + +# pylint: disable=R0914, R0912 + + +class WeightedReducedFitness(deap.base.Fitness): + + """Fitness that compares by weighted objective values""" + + def __init__(self, values=(), obj_size=None, reduce_fcn=numpy.sum): + self.weights = [-1.0] * obj_size if obj_size is not None else [-1] + self.reduce_fcn = reduce_fcn + + super(WeightedReducedFitness, self).__init__(values) + + @property + def reduce(self): + """Reduce of values""" + return self.reduce_fcn(self.values) + + @property + def weighted_reduce(self): + """Reduce of weighted values""" + return self.reduce_fcn(self.wvalues) + + def __le__(self, other): + return self.weighted_reduce <= other.weighted_reduce + + def __lt__(self, other): + return self.weighted_reduce < other.weighted_reduce + + def __deepcopy__(self, _): + """Override deepcopy""" + + cls = self.__class__ + result = cls.__new__(cls) + result.__dict__.update(self.__dict__) + return result + + +class WSListIndividual(list): + + """Individual consisting of a list with weighted fitness""" + + def __init__(self, *args, **kwargs): + """Constructor""" + + reduce_fcn = kwargs.get("reduce_fcn", numpy.sum) + self.fitness = WeightedReducedFitness( + obj_size=kwargs["obj_size"], reduce_fcn=reduce_fcn + ) + + # Index of the parent, used by MO-CMA + self._ps = "p", 0 + + del kwargs["obj_size"] + if "reduce_fcn" in kwargs: + del kwargs["reduce_fcn"] + super(WSListIndividual, self).__init__(*args, **kwargs) + + +def update_history_and_hof(halloffame, history, population): + """Update the hall of fame with the generated individuals + Note: History and Hall-of-Fame behave like dictionaries + """ + if halloffame is not None: + halloffame.update(population) + + history.update(population) + + +def record_stats(stats, logbook, gen, population, invalid_count): + """Update the statistics with the new population""" + record = stats.compile(population) if stats is not None else {} + logbook.record(gen=gen, nevals=invalid_count, **record) + + +def closest_feasible(individual, lbounds, ubounds): + """Returns the closest individual in the parameter bounds""" + # TODO: Fix 1e-9 hack + for i, (u, l, el) in enumerate(zip(ubounds, lbounds, individual)): + if el >= u: + individual[i] = u - 1e-9 + elif el <= l: + individual[i] = l + 1e-9 + return individual + + +def bound(population, lbounds, ubounds): + """Bounds the population based on lower and upper parameter bounds.""" + n_out = 0 + for i, ind in enumerate(population): + if numpy.any(numpy.less(ind, lbounds)) or numpy.any( + numpy.greater(ind, ubounds) + ): + population[i] = closest_feasible(ind, lbounds, ubounds) + n_out += 1 + return n_out + + +def uniform(lower_list, upper_list, dimensions): + """Uniformly pick an individual""" + if hasattr(lower_list, "__iter__"): + return [ + random.uniform(lower, upper) for lower, upper in + zip(lower_list, upper_list) + ] + else: + return [random.uniform(lower_list, upper_list) for _ in + range(dimensions)] + + +def reduce_method(meth): + """Overwrite reduce""" + return (getattr, (meth.__self__, meth.__func__.__name__)) + + +def run_next_gen(criteria, terminator): + """Condition to stay inside the loop.""" + if terminator is None: + return criteria + return criteria and not terminator.is_set() diff --git a/bluepyopt/tests/test_deapext/test_optimisationsCMA.py b/bluepyopt/tests/test_deapext/test_optimisationsCMA.py new file mode 100644 index 00000000..c14cecd1 --- /dev/null +++ b/bluepyopt/tests/test_deapext/test_optimisationsCMA.py @@ -0,0 +1,60 @@ +"""bluepyopt.optimisationsCMA tests""" + +import pytest +import bluepyopt +import bluepyopt.ephys.examples.simplecell + + +@pytest.mark.unit +def test_optimisationsCMA_normspace(): + """deapext.optimisationsCMA: Testing optimisationsCMA normspace""" + + simplecell = bluepyopt.ephys.examples.simplecell.SimpleCell() + evaluator = simplecell.cell_evaluator + optimisation = bluepyopt.deapext.optimisationsCMA.DEAPOptimisationCMA( + evaluator=evaluator) + + x = [n * 0.1 for n in range(len(evaluator.params))] + y = [f2(f1(p)) for p, f1, f2 in zip(x, optimisation.to_norm, + optimisation.to_space)] + + for a, b in zip(x, y): + assert abs(a - b) < 1e-5 + + +@pytest.mark.unit +def test_optimisationsCMA_SO_run(): + """deapext.optimisationsCMA: Testing optimisationsCMA run from centroid""" + + simplecell = bluepyopt.ephys.examples.simplecell.SimpleCell() + evaluator = simplecell.cell_evaluator + x = [n * 0.1 for n in range(len(evaluator.params))] + + optimiser = bluepyopt.deapext.optimisationsCMA.DEAPOptimisationCMA + optimisation = optimiser(evaluator=evaluator, centroids=[x]) + pop, hof, log, hist = optimisation.run(max_ngen=2) + + assert abs(log.select("avg")[-1] - 53.3333) < 1e-4 + assert abs(log.select("std")[-1] - 83.7987) < 1e-4 + assert pop[0] == [0.10525059698894745, 0.01000000003249999] + + +@pytest.mark.unit +def test_optimisationsCMA_MO_run(): + """deapext.optimisationsCMA: Testing optimisationsCMA run from centroid""" + + simplecell = bluepyopt.ephys.examples.simplecell.SimpleCell() + evaluator = simplecell.cell_evaluator + + optimiser = bluepyopt.deapext.optimisationsCMA.DEAPOptimisationCMA + optimisation = optimiser( + selector_name="multi_objective", + offspring_size=3, + evaluator=evaluator, + seed=42 + ) + pop, hof, log, hist = optimisation.run(max_ngen=2) + + assert abs(log.select("avg")[-1] - 120.) < 1e-4 + assert abs(log.select("std")[-1] - 74.8331) < 1e-4 + assert pop[0] == [0.07506300058169628, 0.01000000003249999] diff --git a/bluepyopt/tests/test_deapext/test_utils.py b/bluepyopt/tests/test_deapext/test_utils.py new file mode 100644 index 00000000..7bf098a8 --- /dev/null +++ b/bluepyopt/tests/test_deapext/test_utils.py @@ -0,0 +1,40 @@ +"""bluepyopt.utils tests""" + +import multiprocessing +import time + +import bluepyopt.deapext.utils as utils +import pytest + + +def flag(event): + """Send a multiprocessing event.""" + time.sleep(1) + event.set() + + +def catch_event(event): + """Verify that run_next_gen changes when event is caught.""" + # None case + assert utils.run_next_gen(True, None) + + # event is not set case + assert utils.run_next_gen(True, event) + + # event is set by another process case + time.sleep(2) + assert not(utils.run_next_gen(True, event)) + + +@pytest.mark.unit +def test_run_next_gen_condition(): + """deapext.utils: Testing run_next_gen.""" + event = multiprocessing.Event() + p1 = multiprocessing.Process(target=catch_event, args=(event,)) + p2 = multiprocessing.Process(target=flag, args=(event,)) + + p1.start() + p2.start() + + p1.join() + p2.join() diff --git a/bluepyopt/tests/test_ephys/test_locations.py b/bluepyopt/tests/test_ephys/test_locations.py index 2b4f51b8..24d93e53 100644 --- a/bluepyopt/tests/test_ephys/test_locations.py +++ b/bluepyopt/tests/test_ephys/test_locations.py @@ -1,7 +1,7 @@ """bluepyopt.ephys.simulators tests""" """ -Copyright (c) 2016-2020, EPFL/Blue Brain Project +Copyright (c) 2016-2022, EPFL/Blue Brain Project This file is part of BluePyOpt diff --git a/examples/cma_strategy/cma.ipynb b/examples/cma_strategy/cma.ipynb new file mode 100644 index 00000000..dea21864 --- /dev/null +++ b/examples/cma_strategy/cma.ipynb @@ -0,0 +1,366 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Optimisation using the CMA evolutionary strategy\n", + "\n", + "This notebook will explain how to optimise a model using the covariance matrix adaptation (CMA) optimisation strategy. \n", + "BluePyOpt includes two flavors of CMA: a single objective one and a hybrid single/multi objective one.\n", + "\n", + "For a tutorial on the theory and algorithm behind CMA, please refer to https://arxiv.org/abs/1604.00772.\n", + "\n", + "This notebook uses the simple cell model defined in examples/simplecell. Please refer to this notebook for a first introduction to model fitting." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: matplotlib in /gpfs/bbp.cscs.ch/ssd/apps/tools/jupyter/venvs/python37/lib/python3.7/site-packages (3.4.3)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /gpfs/bbp.cscs.ch/ssd/apps/tools/jupyter/venvs/python37/lib/python3.7/site-packages (from matplotlib) (2.8.2)\n", + "Requirement already satisfied: cycler>=0.10 in /gpfs/bbp.cscs.ch/ssd/apps/tools/jupyter/venvs/python37/lib/python3.7/site-packages (from matplotlib) (0.11.0)\n", + "Requirement already satisfied: numpy>=1.16 in /gpfs/bbp.cscs.ch/ssd/apps/tools/jupyter/venvs/python37/lib/python3.7/site-packages (from matplotlib) (1.21.4)\n", + "Requirement already satisfied: pillow>=6.2.0 in /gpfs/bbp.cscs.ch/ssd/apps/tools/jupyter/venvs/python37/lib/python3.7/site-packages (from matplotlib) (8.4.0)\n", + "Requirement already satisfied: pyparsing>=2.2.1 in /gpfs/bbp.cscs.ch/ssd/apps/tools/jupyter/venvs/python37/lib/python3.7/site-packages (from matplotlib) (2.4.7)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /gpfs/bbp.cscs.ch/ssd/apps/tools/jupyter/venvs/python37/lib/python3.7/site-packages (from matplotlib) (1.3.2)\n", + "Requirement already satisfied: six>=1.5 in /gpfs/bbp.cscs.ch/ssd/apps/tools/jupyter/venvs/python37/lib/python3.7/site-packages (from python-dateutil>=2.7->matplotlib) (1.16.0)\n", + "\u001b[33mWARNING: You are using pip version 20.1.1; however, version 22.0.3 is available.\n", + "You should consider upgrading via the '/gpfs/bbp.cscs.ch/ssd/apps/tools/jupyter/venvs/python37/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\n" + ] + } + ], + "source": [ + "# Install matplotlib if needed\n", + "!pip install matplotlib" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "%matplotlib inline\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import numpy\n", + "\n", + "%load_ext autoreload\n", + "%autoreload" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def plot_fitness(logbook):\n", + " gen_numbers = logbook.select('gen')\n", + " min_fitness = logbook.select('min')\n", + " max_fitness = logbook.select('max')\n", + " plt.plot(gen_numbers, min_fitness, label='min fitness')\n", + " plt.xlabel('generation #')\n", + " plt.ylabel('score (# std)')\n", + " plt.legend()\n", + " plt.xlim(min(gen_numbers) - 1, max(gen_numbers) + 1) \n", + " plt.ylim(0.9*min(min_fitness), 1.1 * max(min_fitness)) " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def plot_responses(responses):\n", + " plt.subplot(2,1,1)\n", + " plt.plot(responses['step1.soma.v']['time'], responses['step1.soma.v']['voltage'], label='step1')\n", + " plt.legend()\n", + " plt.subplot(2,1,2)\n", + " plt.plot(responses['step2.soma.v']['time'], responses['step2.soma.v']['voltage'], label='step2')\n", + " plt.legend()\n", + " plt.tight_layout()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Setting up the cell template and evaluator\n", + "-------------------------" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, we instantiate the cell template and evaluator as defined in the simplecell example:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "import bluepyopt.ephys.examples.simplecell\n", + "\n", + "simple_cell = bluepyopt.ephys.examples.simplecell.SimpleCell()\n", + "evaluator = simple_cell.cell_evaluator" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Optimisation using single objective CMA (SO-CMA)\n", + "-------------------------" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First we will present the single objective version of the CMA strategy.\n", + "\n", + "In this version of CMA, the optimizer aims at minimizing a single fitness value computed as the sum of the scores of the objectives.\n", + "\n", + "Note that in CMA, informing the offspring_size is optional as by default, it is automatically set to\n", + "int(4 + 3 * log(dimension_parameter_space))." + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [], + "source": [ + "optimiser = bluepyopt.deapext.optimisationsCMA.DEAPOptimisationCMA\n", + "optimisation = optimiser(evaluator=evaluator, seed=1)\n", + "pop, hof, log, hist = optimisation.run(max_ngen=10)" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Best individual: [0.11513442416272959, 0.038816802452611765]\n", + "Fitness values: (0.0,)\n" + ] + } + ], + "source": [ + "best_ind = hof[0]\n", + "print('Best individual: ', best_ind)\n", + "print('Fitness values: ', best_ind.fitness.values)" + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYEAAAEGCAYAAACD7ClEAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/YYfK9AAAACXBIWXMAAAsTAAALEwEAmpwYAAAhfUlEQVR4nO3de5RU5Z3u8e+vbzSXpguhRaQqA0RAUKhSwSSS8SQS8YbIJI7HRBOJRk/OmWgcJ0ZnMrMyzpyVmJWTmDEzahg14klM4i1KoscLRkeZZFTQbkBQUdKmG7k0SDf3S3f/zh+1G1vsSzXUrl2X57NWra7LrqrHXsLD3u/e72vujoiIlKayqAOIiEh0VAIiIiVMJSAiUsJUAiIiJUwlICJSwiqiDpCJUaNG+bhx46KOISJSUJYvX77F3ev62qYgSmDcuHEsW7Ys6hgiIgXFzN7pbxsdDhIRKWEqARGREqYSEBEpYQUxJiAihefAgQM0Nzezd+/eqKMUverqauLxOJWVlQN+r0pARELR3NxMTU0N48aNw8yijlO03J2tW7fS3NzM+PHjB/x+HQ4SkVDs3buXkSNHqgBCZmaMHDnysPe4VAIiEhoVQG4cye9ZJSAiUsJUAiJS0hYvXszNN988oPfceuutTJkyhUsuueQD73/kkUdYvXp1GDFDo4FhESlp8+bNY968eQN6z2233caSJUuIx+MHPwPSJTB37lymTp2a9Zxh0Z6AiBSlxsZGjj/+eBYsWMCkSZO45JJLWLJkCbNmzWLixIm89NJLANxzzz187WtfA2DBggVcc801nHbaaUyYMIEHH3zwQ5/71a9+lXXr1nHOOedwyy23HHz/73//exYvXsz1119PKpXi7bff5lOf+hQ33HADp556KpMmTeKFF14AoKOjg+uvv56ZM2cyffp0fvKTnwCwYcMGTj/9dFKpFCeeeCIvvPACHR0dLFiwgBNPPJFp06Zxyy23ZPX3pD0BEQndTb95jdXvbs/qZ049djjfPv+EPrd56623eOCBB7j77ruZOXMm9913H0uXLmXx4sV85zvf4ZFHHvnQezZs2MDSpUt5/fXXmTdvHhdeeOEHXr/jjjt44oknePbZZxk1ahT33HMPAKeddhrz5s1j7ty5H3hPe3s7L730Eo8//jg33XQTS5Ys4a677qK2tpaXX36Zffv2MWvWLObMmcPDDz/MWWedxbe+9S06OjrYvXs39fX1rF+/nlWrVgHQ2tp6RL+3Q6kERKRojR8/nmnTpgFwwgknMHv2bMyMadOm0djY2ON75s+fT1lZGVOnTmXTpk1HnOGzn/0sAKeccsrB73zqqadYsWLFwT2NtrY21q5dy8yZM7n88ss5cOAA8+fPJ5VKMWHCBNatW8fVV1/Neeedx5w5c444U3cqAREJXX//Yg/LoEGDDt4vKys7+LisrIz29vZ+3+PuWctQXl5+8DvdnR//+MecddZZH9r++eef57HHHmPBggVcd911fOlLX6KhoYEnn3ySO+64g/vvv5+77777iHN1UQmIiGRJTU0NO3bs6He7s846i9tvv50zzjiDyspK3nzzTcaOHcuWLVuIx+NceeWV7Nu3j1deeYVzzz2XqqoqPve5zzF58mQuvfTSrGYOtQTMLAbcCZwIOHA58AbwK2Ac0Ahc5O7bwswhIpILF198MVdeeSW33nprj4PKXb7yla/Q2NjIySefjLtTV1fHI488wnPPPcf3v/99KisrGTZsGPfeey/r16/ny1/+Mp2dnQB897vfzWpmy8buTq8fbrYIeMHd7zSzKmAI8HfAe+5+s5ndCIxw9xv6+pwZM2a4FpURKSxr1qxhypQpUccoGT39vs1subvP6Ot9oZ0iama1wOnAXQDuvt/dW4ELgEXBZouA+WFlyJWtO/dx40Mr+MPbW6OOIiIyIGFeJzAeaAF+amavmtmdZjYUGO3uG4JtNgKje3qzmV1lZsvMbFlLS0uIMY/c0EEVPLC8maVv5XdOEZFDhVkCFcDJwO3ufhKwC7ix+waePhbV4/Eod1/o7jPcfUZdXZ/rJEeuurKc44+poaGpLeooInklzMPN8r4j+T2HWQLNQLO7vxg8fpB0KWwyszEAwc/NIWbImWQiRkNzK52d+p9eBNILnWzdulVFELKu9QSqq6sP6/2hnR3k7hvNrMnMJrv7G8BsYHVwuwy4Ofj5aFgZcikVj3Hfi39i3ZZdHHf0sKjjiEQuHo/T3NxMvh/OLQZdK4sdjrCvE7ga+HlwZtA64Muk9z7uN7MrgHeAi0LOkBOpj8QAaGhqVQmIAJWVlYe10pXkVqgl4O71QE+nJ80O83uj8NG6YQytKqehuZXPnXJ4jSwikmuaRTRLysuMafFaGppao44iIpIxlUAWJRMxVm/Yzr72jqijiIhkRCWQRal4jAMdnvUpc0VEwqISyKLug8MiIoVAJZBFxwyv5uiaQTQ066IxESkMKoEsMrP0RWPaExCRAqESyLJUIsa6Lbto230g6igiIv1SCWRZMh4DoKG5NdIcIiKZUAlk2fRELaDBYREpDCqBLBteXclH64ZqT0BECoJKIATJRIz6pjbNnigieU8lEIJUIsaWnft4t21v1FFERPqkEghB1+Bw/Z9aI80hItIflUAIpowZTlV5mcYFRCTvqQRCUFVRxtRjh1OvM4REJM+pBEKSSsRY2dxGe0dn1FFERHqlEghJMlHLngMdvNWyM+ooIiK9UgmERIPDIlIIVAIhGT9qKMOrKzQ4LCJ5TSUQkq4ZReubNK20iOQvlUCIUokYb27awe797VFHERHpkUogRMl4jI5O5zUtNykieUolEKJkIgZocFhE8pdKIER1NYMYGxtMvQaHRSRPVYT54WbWCOwAOoB2d59hZkcBvwLGAY3ARe6+LcwcUUppuUkRyWO52BP4tLun3H1G8PhG4Bl3nwg8EzwuWslELc3b9rBl576oo4iIfEgUh4MuABYF9xcB8yPIkDNdF42t0CEhEclDYZeAA0+Z2XIzuyp4brS7bwjubwRG9/RGM7vKzJaZ2bKWlpaQY4ZnWryWMtPgsIjkp1DHBIBPuvt6MzsaeNrMXu/+oru7mfW4/Ja7LwQWAsyYMaNgl+gaUlXBpNE11DfrojERyT+h7gm4+/rg52bg18CpwCYzGwMQ/NwcZoZ80DU4rOUmRSTfhFYCZjbUzGq67gNzgFXAYuCyYLPLgEfDypAvkokYbXsO8M7W3VFHERH5gDAPB40Gfm1mXd9zn7s/YWYvA/eb2RXAO8BFIWbIC12Dww3NrYwbNTTaMCIi3YRWAu6+Dkj28PxWYHZY35uPJo0exuDKcuqbWrkgNTbqOCIiB+mK4RyoKC9j2thaLTcpInlHJZAjyUQtr727nf3tWm5SRPKHSiBHkokY+9s7eWPjjqijiIgcpBLIkYPLTerKYRHJIyqBHImPGMyoYVWaTE5E8opKIEfMjGQ8psFhEckrKoEcSiZivN2yk+17D0QdRUQEUAnkVDIRwx1WaR4hEckTKoEcSsZrAQ0Oi0j+UAnkUGxIFeNHDdXgsIjkDZVAjiXjunJYRPKHSiDHkokYm7bvY2Pb3qijiIioBHItmYgBaG9ARPKCSiDHpo4ZTmW50aDBYRHJAyqBHKuuLGfKmOEaHBaRvKASiEAyHmNFcxsdnVpuUkSipRKIQDIRY+e+dta17Iw6ioiUOJVABFKJ4KIxHRISkYipBCIwYdQwagZVaHBYRCKnEohAWZkxPVFLQ5PmEBKRaPVbAmYWN7NvmNmjZvaymT1vZreZ2XlmphI5TMl4jDUbtrP3QEfUUUSkhPX5l7iZ/RS4G9gPfA/4PPC/gCXA2cBSMzs97JDFKJmI0d7pvPbu9qijiEgJq+jn9R+4+6oenl8FPGxmVcBHsh+r+KWCK4cbmlo55c9GRBtGREpWnyXQSwF0f30/8FZWE5WI0cOrOWZ4tQaHRSRSfZaAma0Eer2iyd2n9/cFZlYOLAPWu/tcMxsP/BIYCSwHvhiUSclJJWK6clhEItXfwO5c4HzgieB2SXB7PLhl4uvAmm6Pvwfc4u7HAduAKwYSuJgkEzEat+5m266S7EARyQN9loC7v+Pu7wBnuvs33X1lcLsRmNPfh5tZHDgPuDN4bMAZwIPBJouA+UeQv6Alg4vGdEhIRKKS6SmeZmazuj04LcP3/gj4JtAZPB4JtLp7e/C4GRjbyxdeZWbLzGxZS0tLhjELy7SxtZih6wVEJDKZlsAVwG1m1mhmjcBtwOV9vcHM5gKb3X354QRz94XuPsPdZ9TV1R3OR+S9mupKjqsbpj0BEYlMf6eIdnnP3ZNmVgvg7m3BAG9fZgHzzOxcoBoYDvwLEDOzimBvIA6sP8zsRSGViPG71zfj7qSPlomI5E6mewIPQfovf3fvOnbxYB/b4+5/6+5xdx8HXAz8zt0vAZ4FLgw2uwx4dMCpi0gyEWPrrv00b9sTdRQRKUH9nSJ6PHACUGtmn+320nDS/7o/HDcAvzSz/w28Ctx1mJ9TFFLdlptMHDUk2jAiUnL6Oxw0mfRpojHSp4p22QFcmemXuPtzwHPB/XXAqQPIWNQmH1NDVUUZDU2tnJ88Nuo4IlJi+rti+FHgUTP7hLv/IUeZSkpleRknHjtcg8MiEolMxwT+wsyGm1mlmT1jZi1mdmmoyUpIKjGClevbaO/o7H9jEZEsyrQE5rj7dtKHhhqB44DrwwpVapKJWvYe6OSNTTuijiIiJSbTEqgMfp4HPNDtDCHJgvdnFNWvVURyK9MS+I2ZvQ6cAjxjZnXA3vBilZaPHDWE2JBKTSYnIjmXUQkEcwWdBsxw9wPAbuCCMIOVEjMjGY9pcFhEci7j5SHd/T137wju73L3jeHFKj2pRIw3N+1g1772/jcWEckSrRGcJ1KJGJ0OK9drXEBEckclkCemx4NppTUuICI5lFEJmNnZYQcpdSOHDSJx1GCNC4hITvVZAmb2sWB5yO90e+7/hp6qRCXjMZ0mKiI51d+ewMXAfwATzOx7ZnYJcHL4sUpTKhFjfeseNu/Q2bcikhv9lcAN7v5J4E/Ab4ERwDFm9l9m9qvQ05UYXTQmIrnWXwk8YWZPA3XAKOD/Ae+4+8eBvwk7XKk54dhaystMg8MikjP9LTR/BjAP2AlMAP4ZOM7MHiF9qEiyaHBVOZNH12hwWERypt+zg9x9D9Dk7j9w9y8Ab5NeS+CPYYcrRclEjIamVjo7PeooIlICMp024oxuD//V3Vvc/aGQMpW0kxIxtu9tp3HrrqijiEgJGPDFYu5e0stBhi3ZbblJEZGw9XedwG/M7Hwzq+zhtQlm9k9mdnl48UrPcUcPY0hVuQaHRSQn+ltj+ErgOuBHZvYe0EJ6gflxpMcG/jVYglKypLzMmDa2lvpmnSYqIuHrb43hjcA3gW+a2ThgDLAHeNPdd4cfrzSlEjF++p+N7GvvYFBFedRxRKSIDWQq6UZ3/4O716sAwpVKxNjf0cnrG7TcpIiES7OI5iENDotIrqgE8tCY2mrqagZpcFhEQpdxCZjZYDObPIDtq83sJTNrMLPXzOym4PnxZvaimb1lZr8ys6rDCV7MupabrNeVwyISskzXEzgfqAeeCB6nzGxxP2/bB5zh7kkgBZxtZh8Hvgfc4u7HAduAKw4venFLJWpZ17KLtj0Hoo4iIkUs0z2BfwROBVoB3L0eGN/XGzxtZ/CwMrg5cAbwYPD8ImD+APKWjFRiBAArdaqoiIQo0xI44O6H/m3U7+Q2ZlZuZvXAZuBp0tcWtLp712rqzcDYXt57lZktM7NlLS0tGcYsHtOC5Sbrm7ZFnEREilmmJfCamX0BKDeziWb2Y+D3/b3J3TvcPQXESe9JHJ9pMHdf6O4z3H1GXV1dpm8rGrWDK5lQN5R6rS0gIiHKtASuBk4gfZz/PqANuDbTL3H3VuBZ4BNAzMy6LlKLA+sz/ZxSk4rHqG9qxV0ziopIOPotgWCN4cfc/VvuPjO4/b2797kGopnVmVksuD8YOBNYQ7oMLgw2uwzQtBO9SCZibNm5jw1tWm5SRMKRyXoCHUCnmdUO8LPHAM+a2QrgZeBpd/8tcANwnZm9BYwENCtpL95fbrI10hwiUrz6m0Cuy05gZbDU5MGJ7t39mt7e4O4rgJN6eH4d6fEB6cfxY2qoKi+jvqmVc6aNiTqOiBShTEvg4eAmOTSoopwpxw7X9BEiEpqMSsDdFwVX9k4KnnrD3XUVUw6k4rU8sLyZjk6nvMyijiMiRSbTK4Y/BawF/g24DXjTzE4PL5Z0SSZi7N7fwVubd/a/sYjIAGV6OOgHwBx3fwPAzCYBvwBOCSuYpHUfHJ58TE20YUSk6GR6nUBlVwEAuPubpKeBkJCNGzmU4dUVvKpxAREJQaZ7AsvM7E7gZ8HjS4Bl4USS7srKjGQiptNERSQUme4J/E9gNXBNcFsdPCc5kIzHeGPTDvbs74g6iogUmUz3BCqAf3H3H8LBq4gHhZZKPiCZiNHR6bz2bhszxh0VdRwRKSKZ7gk8Awzu9ngwsCT7caQnyUTXjKKt0QYRkaKTaQlUd1sbgOD+kHAiyaGOrqlmbGywSkBEsi7TEthlZid3PTCzU4A94USSniQTtTRouUkRybJMxwSuBR4ws3cBA44B/ntYoeTDkvEYj6/cyNad+xg5TMMxIpIdmU4b8bKZHQ90LTSvaSNyLBlcNLaiuY1PH390tGFEpGhkOm3EX5IeF1hFek3gX3U/PCThmza2ljLT4LCIZFemYwL/4O47zOyTwGzSawDcHl4sOdTQQRVMGl2jEhCRrMq0BLquUjoP+Hd3fwyoCieS9CYZj9HQrOUmRSR7Mi2B9Wb2E9KDwY+b2aABvFeyJJmI0br7AH96b3fUUUSkSGT6F/lFwJPAWcGi8UcB14cVSnqmi8ZEJNsyKgF33+3uD7v72uDxBnd/KtxocqjJo2uoriyjoakt6igiUiR0SKeAVJSXMW1sLfVN26KOIiJFQiVQYJLxGKve3c6Bjs6oo4hIEVAJFJhkIsb+9k7e2Lgj6igiUgRUAgWma7lJDQ6LSDaoBApMfMRgRg6t0kpjIpIVoZWAmSXM7FkzW21mr5nZ14PnjzKzp81sbfBzRFgZipFZsNykZhQVkSwIc0+gHfgbd58KfBz4KzObCtwIPOPuE0kvVnNjiBmKUjIeY+3mnezYqzn8ROTIhFYCwbUErwT3dwBrgLHABcCiYLNFpCekkwFIJmpxh5Xrdb2AiByZnIwJmNk44CTgRWC0u28IXtoIjO7lPVeZ2TIzW9bS0pKLmAWja3BYF42JyJEKvQTMbBjwEHCtu2/v/pqnZ0LrcTY0d1/o7jPcfUZdXV3YMQtKbEgV40YO0eCwiByxUEvAzCpJF8DP3f3h4OlNZjYmeH0MsDnMDMVKg8Mikg1hnh1kpNcdWOPuP+z20mLgsuD+ZcCjYWUoZsl4jA1te9m0fW/UUUSkgIW5JzAL+CJwhpnVB7dzgZuBM81sLfCZ4LEMUFIXjYlIFmS60PyAuftS0ovS92R2WN9bKk44djgVZUZDUytnnXBM1HFEpEDpiuECVV1ZzpQxwzUuICJHRCVQwJKJWlY0tdHZqeUmReTwqAQKWDIeY8e+dtZt2Rl1FBEpUCqBAvb+jKK6aExEDo9KoIB9tG4YwwZV6KIxETlsKoECVlZmTI/XanBYRA6bSqDAJRMx1mzYzt4DHVFHEZECpBIocMl4jAMdzuoN2/vfWETkECqBAvf+jKKtkeYQkcKkEihwx9RWc8zwapWAiBwWlUARSCZqaWjWaaIiMnAqgSKQTMT445ZdtO7eH3UUESkwKoEikIrHALQ3ICIDphIoAtPitZhpcFhEBk4lUARqqis5rm6YSkBEBkwlUCS6lptML9ssIpIZlUCRSCZibNm5n/Wte6KOIiIFRCVQJLoGh7XcpIgMhEqgSEw+poaqijKNC4jIgKgEikRVRRknHjucBq0tICIDoBIoIslEjJXr22jv6Iw6iogUCJVAEUklYuw50MHazVpuUkQyoxIoIkkNDovIAKkEisifjRxCbEilBodFJGOhlYCZ3W1mm81sVbfnjjKzp81sbfBzRFjfX4rMjGQ8pj0BEclYmHsC9wBnH/LcjcAz7j4ReCZ4LFmUTMR4c9MOdu9vjzqKiBSA0ErA3Z8H3jvk6QuARcH9RcD8sL6/VKUStXQ6rFqv5SZFpH+5HhMY7e4bgvsbgdG9bWhmV5nZMjNb1tLSkpt0RWD6wcHhbdEGEZGCENnAsKdnOut1tjN3X+juM9x9Rl1dXQ6TFbZRwwYRHzFYF42JSEZyXQKbzGwMQPBzc46/vySkEhocFpHM5LoEFgOXBfcvAx7N8feXhFQixvrWPbTs2Bd1FBHJc2GeIvoL4A/AZDNrNrMrgJuBM81sLfCZ4LFkWTIRA2BFc2ukOUQk/1WE9cHu/vleXpod1ndK2gnHDqe8zKhvamX2lF7H3kVEdMVwMRpSVcGk0TUaFxCRfqkEilQqEaOhSctNikjfVAJFKpWoZfvedhq37o46iojkMZVAkeoaHNZkciLSF5VAkZp4dA1Dqso1LiAifVIJFKnyMuPEsbUqARHpk0qgiJ2UiLH63e3sb9dykyLSM5VAEUsmYuzv6OT1jZpRVER6phIoYhocFpH+qASK2LG11YwaNohXVQIi0guVQBEzM1KJWu0JiEivVAJFLpWI8XbLLrbvPRB1FBHJQyqBItc1LrCyWYvMiMiHqQSK3PSxMQBdLyAiPVIJFLnaIZVMGDVUJSAiPVIJlIBksNykZhQVkUOpBEpAKhGjZcc+Nm7fG3UUEckzKoESoIvGRKQ3KoESMGVMDZXlRn2TzhASkQ9SCZSAQRXlTB0znPqmbVFHEZE8oxIoEclEjJXNbXR0anBYRN6nEigRqUSMXfs7eLtlZ9RRRCSPqARKRNfgsK4XEJHuVAIlYvzIodRUV+gMIRH5gEhKwMzONrM3zOwtM7sxigylpqzMSMZj2hMQkQ/IeQmYWTnwb8A5wFTg82Y2Ndc5SlEyUcvrG3ew90BH1FFEJE9URPCdpwJvufs6ADP7JXABsDqCLCUllRhBR6dz0j89TZlFnUakcH3lzyfw12dOijpGVkRRAmOBpm6Pm4GPHbqRmV0FXBU83Gdmq3KQ7UiNArZEHSIDhZCzEDKCcmZbQeS8DkZdVwA5gcn9bRBFCWTE3RcCCwHMbJm7z4g4Ur+UM3sKISMoZ7YpZ3aZ2bL+toliYHg9kOj2OB48JyIiORZFCbwMTDSz8WZWBVwMLI4gh4hIycv54SB3bzezrwFPAuXA3e7+Wj9vWxh+sqxQzuwphIygnNmmnNnVb07TQiMiIqVLVwyLiJQwlYCISAnL6xIolOklzOxuM9ucz9cymFnCzJ41s9Vm9pqZfT3qTD0xs2oze8nMGoKcN0WdqS9mVm5mr5rZb6PO0hszazSzlWZWn8kpg1Ews5iZPWhmr5vZGjP7RNSZDmVmk4PfYddtu5ldG3WunpjZXwd/flaZ2S/MrLrXbfN1TCCYXuJN4EzSF5S9DHze3fPuymIzOx3YCdzr7idGnacnZjYGGOPur5hZDbAcmJ9vv08zM2Cou+80s0pgKfB1d/+viKP1yMyuA2YAw919btR5emJmjcAMd8/bi5vMbBHwgrvfGZw1OMTdWyOO1avg76f1wMfc/Z2o83RnZmNJ/7mZ6u57zOx+4HF3v6en7fN5T+Dg9BLuvh/oml4i77j788B7Uefoi7tvcPdXgvs7gDWkr97OK57WtehBZXDLy3+pmFkcOA+4M+oshczMaoHTgbsA3H1/PhdAYDbwdr4VQDcVwGAzqwCGAO/2tmE+l0BP00vk3V9ahcjMxgEnAS9GHKVHwSGWemAz8LS752VO4EfAN4HOiHP0x4GnzGx5MB1LvhkPtAA/DQ6t3WlmQ6MO1Y+LgV9EHaIn7r4e+D/An4ANQJu7P9Xb9vlcAhICMxsGPARc6+7bo87TE3fvcPcU6avJTzWzvDvEZmZzgc3uvjzqLBn4pLufTHrm3r8KDl/mkwrgZOB2dz8J2AXk8xhgFTAPeCDqLD0xsxGkj5qMB44FhprZpb1tn88loOklsiw4xv4Q8HN3fzjqPP0JDgk8C5wdcZSezALmBcfbfwmcYWY/izZSz4J/GeLum4Ffkz7Umk+ageZue3wPki6FfHUO8Iq7b4o6SC8+A/zR3Vvc/QDwMHBabxvncwloeoksCgZc7wLWuPsPo87TGzOrM7NYcH8w6RMDXo80VA/c/W/dPe7u40j/v/k7d+/1X1tRMbOhwYkABIdY5gB5dRabu28Emsysa8bL2eT31PKfJ08PBQX+BHzczIYEf+5nkx4D7FE+zyJ6ONNLRMLMfgF8ChhlZs3At939rmhTfcgs4IvAyuB4O8Dfufvj0UXq0RhgUXD2RRlwv7vn7emXBWA08Ov03wVUAPe5+xPRRurR1cDPg3/wrQO+HHGeHgVFeibwP6LO0ht3f9HMHgReAdqBV+lj+oi8PUVURETCl8+Hg0REJGQqARGREqYSEBEpYSoBEZESphIQESlhKgGRLDGza81sSLfHj3dd85Clzx9qZkuC+0uDeWFEjohKQCRDltbXn5lrSU/WBYC7n5vlidA+AfwhmBZgl7u3Z/GzpUSpBKSgmdk/BGtOLA3mTf9G8PxHzeyJYNK0F8zs+OD5e8zsVjP7vZmtM7MLu33W9Wb2spmt6FrHwMzGBZ9/L+krbRNmdruZLeu+3oGZXUN6npZnzezZ4LlGMxsV3L8umNt9Vdcc9MFnrzGzfw8+66ngKulD/xs/Glzg9zPgC6SnAU8Gc9ofHc5vVkqGu+umW0HegJlAPVAN1ABrgW8Erz0DTAzuf4z0tA4A95Ce+KsMmEp6unJIT6ewELDgtd+Snt54HOlZQj/e7XuPCn6WA88B04PHjcCobts1AqOAU4CVwFBgGPAa6Vlcx5G+ojMVbH8/cGkf/72PASOBbwPnRf371604bjqmKIVsFvCou+8F9prZb+DgTKmnAQ8E0yUADOr2vkfcvRNYbWajg+fmBLdXg8fDgImk52F5xz+4qM1FwZTMFaSnuZgKrOgj5yeBX7v7riDfw8Cfk54L64/uXh9st5x0MfTmaHffambTCebeFzlSKgEpRmVAq6eno+7Jvm73rdvP77r7T7pvGKy9sKvb4/HAN4CZ7r7NzO4hvSdyuLpn6QB6Ohx0B+kiiQeHhSYCvzWzRe5+yxF8t4jGBKSg/SdwvqXXJR4GzAXw9DoJfzSzv4SDA7rJfj7rSeDy4HMws7G9HG8fTroU2oK9iHO6vbaD9GGpQ70AzA9mdRwK/EXwXEbc/avATcA/A/OBx9w9pQKQbNCegBQsd3/ZzBaTPhSzifRx97bg5UuA283s70kvUflLoKGPz3rKzKaQPvsG0mtGX0r6X+fdt2sws1dJT2/dRLqIuiwEnjCzd939093e80qwx/BS8NSd7v5qsJeRqf8G3Ev6MNJ/DOB9In3SLKJS0MxsmKcXpR8CPA9c5cFayiLSP+0JSKFbaGZTSR+XX6QCEBkY7QmIiJQwDQyLiJQwlYCISAlTCYiIlDCVgIhICVMJiIiUsP8P9zqEyK0OL0oAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_fitness(log)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If one wishes to optimize starting form a known solution, or fine-tune a past mdodel, the argument `centroid` can be used to initialise the center of the CMA search. In this case, it might be useful to also specify `sigma`, which is the initial standard deviation of the distribution from which the models are drawn.\n", + "\n", + "For example here,we can restart from the final results of the previous optimisation:" + ] + }, + { + "cell_type": "code", + "execution_count": 56, + "metadata": {}, + "outputs": [], + "source": [ + "optimisation = optimiser(centroids=[list(hof[0])], sigma=0.01, evaluator=evaluator, seed=2)\n", + "pop, hof, log, hist = optimisation.run(max_ngen=10)" + ] + }, + { + "cell_type": "code", + "execution_count": 57, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Best individual: [0.11485248564327624, 0.039145905479049836]\n", + "Fitness values: (0.0,)\n" + ] + } + ], + "source": [ + "best_ind = hof[0]\n", + "print('Best individual: ', best_ind)\n", + "print('Fitness values: ', best_ind.fitness.values)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Optimisation using multi objective CMA (MO-CMA)\n", + "-------------------------" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Second is the hybrid single/multi objective CMA strategy. It is tasked with both:\n", + "- minimizing the fitness computed as the sum of the scores \n", + "- maximizimg the hyper-volume of the Pareto front formed by the current population of models.\n", + "\n", + "At each generation, all models in the population are ranked for both criteria, and a mixed rank is obtained following the formula:\n", + "\n", + "rank_mixed = w_hv * rank_hv. + ((1 - w_hv) * rank_fitness).\n", + "\n", + "Following this ranking, the best models are selected to update the CMA kernel for the next generation.\n", + "\n", + "By default, the weight assigned to the hyper-volume ranking (w_hv) is set to 0.5. The case w_hv=1 would lead to a pure multi-objective optimisation aiming at maximizing the hypervolume while w_hv would aim at minimizing the raw fitness (note that in the latter, the result would differ from using the SO-CMA as the MO-CMA uses a slightly different evolutionary logic)." + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "metadata": {}, + "outputs": [], + "source": [ + "optimisation = optimiser(weight_hv=0.5, offspring_size=3, selector_name=\"multi_objective\", evaluator=evaluator, seed=2)\n", + "pop, hof, log, hist = optimisation.run(max_ngen=10)" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Best individual: [0.11309639259118459, 0.034266363909519156]\n", + "Fitness values: (0.0,)\n" + ] + } + ], + "source": [ + "best_ind = hof[0]\n", + "print('Best individual: ', best_ind)\n", + "print('Fitness values: ', best_ind.fitness.values)" + ] + }, + { + "cell_type": "code", + "execution_count": 60, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY4AAAEGCAYAAABy53LJAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/YYfK9AAAACXBIWXMAAAsTAAALEwEAmpwYAAAqNElEQVR4nO3de5xcdX3/8ddnN7vZSy5kZxfIdScoIPcIARXUItRwEQO11kKxGkHyoD8vpbZUrP3Vqr+H4s8qvyIVREkBq1RBxCjItVigXiBggHBRkMyQDYHsTu4z2fvn98ecs5kse5ndndkzl/fz8djHzpw558xnIs5nv7fP19wdERGRfNVEHYCIiJQXJQ4REZkQJQ4REZkQJQ4REZkQJQ4REZmQGVEHUEitra0ej8ejDkNEpGw8/vjjXe7eNpFrKipxxONx1q1bF3UYIiJlw8ySE71GXVUiIjIhShwiIjIhShwiIjIhFTXGISLlp6+vj46ODrq7u6MOpaI1NDSwaNEi6urqpnwvJQ4RiVRHRwezZ88mHo9jZlGHU5HcnVQqRUdHB0uXLp3y/dRVJSKR6u7uJhaLKWkUkZkRi8UK1qpT4hCRyClpFF8h/42VOEREZEKUOERE8rB27VquvPLKCV1z9dVXc8QRR3DhhRfud/0dd9zBs88+W4wwp4UGx0VE8rBy5UpWrlw5oWu++c1vcv/997No0aKhe0A2cZxzzjkceeSRBY9zOqjFISJVLZFI8KY3vYlVq1Zx2GGHceGFF3L//fdzyimncOihh/Loo48CcOONN/Lxj38cgFWrVvHJT36Sk08+mUMOOYTbbrvtdfe99NJLeemllzjrrLO46qqrhq7/5S9/ydq1a7n88stZtmwZf/jDHzj11FP59Kc/zUknncRhhx3Gww8/DMDAwACXX345J554Isceeyzf+ta3ANiyZQvvfOc7WbZsGUcffTQPP/wwAwMDrFq1iqOPPppjjjmGq666qmj/ZmpxiEjJ+PxPn+HZV3YV9J5HLpjD59571JjnvPjii9x6662sWbOGE088ke9///s88sgjrF27li996Uvccccdr7tmy5YtPPLIIzz//POsXLmS97///fu9ft1113H33Xfz4IMP0trayo033gjAySefzMqVKznnnHP2u6a/v59HH32Uu+66i89//vPcf//93HDDDcydO5fHHnuMnp4eTjnlFFasWMHtt9/OGWecwWc/+1kGBgbIZDKsX7+ezZs3s2HDBgB27NgxpX+3sShxiEjVW7p0KccccwwARx11FKeffjpmxjHHHEMikRjxmvPOO4+amhqOPPJIXnvttSnH8L73vQ+AE044Yeg97733Xp566qmhFs3OnTt54YUXOPHEE7nooovo6+vjvPPOY9myZRxyyCG89NJLfOITn+A973kPK1asmHJMo1HiEJGSMV7LoFhmzpw59LimpmboeU1NDf39/eNe4+4Fi6G2tnboPd2db3zjG5xxxhmvO/+hhx7izjvvZNWqVXzqU5/iQx/6EE8++ST33HMP1113HT/84Q9Zs2bNlOMaiRKHiMg0mz17Nrt37x73vDPOOINrr72W0047jbq6On7/+9+zcOFCurq6WLRoEZdccgk9PT088cQTnH322dTX1/Onf/qnHH744Xzwgx8sWvxFSxxmthi4GTgIcOB6d/9XM2sBfgDEgQTwAXffPsL1Hwb+MXj6f9z9pmLFKiIync4//3wuueQSrr766hEH1kMf/ehHSSQSHH/88bg7bW1t3HHHHfziF7/gq1/9KnV1dcyaNYubb76ZzZs385GPfITBwUEAvvzlLxctfitEE2vEG5vNB+a7+xNmNht4HDgPWAVsc/crzewKYJ67f3rYtS3AOmA52aTzOHDCSAkm1/Lly10bOYmUl+eee44jjjgi6jCqwkj/1mb2uLsvn8h9itbicPctwJbg8W4zew5YCJwLnBqcdhPwC+DTwy4/A7jP3bcBmNl9wJnALcWKt5Rt2bmXqx94kb6BwWl7z/aWJj5+2htVCkJEXmdaxjjMLA68GfgNcFCQVABeJduVNdxCYFPO847g2Ej3Xg2sBliyZEmBIi4tdz39Krc8+jIL5jZMyxd5urefHZk+zj9pCW2zZ45/gYhUlaInDjObBfwIuMzdd+V+8bm7m9mU+src/Xrgesh2VU3lXqUqmUozu2EG/3PFadOSOB58fisfufExXt6WVuKQaeHuat0WWSGHJYq6ctzM6sgmje+5++3B4deC8Y9wHGTrCJduBhbnPF8UHKtKiVSGeKx52v6P1R5ryr5vV2Za3k+qW0NDA6lUqqBfbLK/cD+OhoaGgtyvmLOqDLgBeM7dv57z0lrgw8CVwe+fjHD5PcCXzGxe8HwF8JlixVrqkqk0xyycO23vt2heEzWWfV+RYlu0aBEdHR10dnZGHUpFC3cALIRidlWdAvwl8LSZrQ+O/QPZhPFDM7sYSAIfADCz5cCl7v5Rd99mZl8EHguu+0I4UF5t+gYG6di+l/ceu2Da3rN+Rg0L5zWSSKnFIcVXV1dXkF3pZPoUc1bVI8BofSunj3D+OuCjOc/XAMVZ9lhGNm/fy8CgD3UfTZd4rFktDhEZkarjlriNwZf30tbmaX3feKyZjV1p9TuLyOsocZS4ZFc2cbTHpjdxtMea2NWdnZYrIpJLiaPEJVIZmutraZ1VP63vGw8SVULdVSIyjBJHiUum0rRP41TcULy1KXh/DZCLyP6UOEpcMpUZ+hKfTovmNWGmFoeIvJ4SRwnrHxhk0/bMtI9vADTU1bJgbqNaHCLyOkocJWzLzm76Bpz4NE/FDbXHmtTiEJHXUeIoYRsjmlEVao81k+hS4hCR/SlxlLBkRGs4Qktbm9ie6WOnpuSKSA4ljhKWSGVoqKvhwIgq1IYtneQ2tTpEZB8ljhKWTKWntSrucPvWcmiAXET2UeIoYYlUZtprVOVa0hKs5dA4h4jkUOIoUQODzsvBPhxRaayv5eA5DWpxiMh+lDhK1Ku7uukdGIxsRlWoPdakKrkish8ljhIVToONag1HKB5rVotDRPajxFGiwoV38Yim4obirc107elhd7em5IpIVtESh5mtMbOtZrYh59gPzGx98JPI2Rlw+LUJM3s6OG9dsWIsZclUhvoZNRw8pzB7BE9W2OJR6RERCRVz69gbgWuAm8MD7v7n4WMz+xqwc4zr3+XuXUWLrsQlutK0tzRRUxPNVNzQ0FqOVIajp3HfcxEpXcXcOvYhM4uP9JplFyZ8ADitWO9f7pKpaIobDhdOB1bNKhEJRTXG8Q7gNXd/YZTXHbjXzB43s9Vj3cjMVpvZOjNb19nZWfBAozA46CS3pSMfGAdonjmDttkzNbNKRIZElTguAG4Z4/W3u/vxwFnAx8zsnaOd6O7Xu/tyd1/e1tZW6DgjsXV3D919g7RHPDAeiseaNLNKRIZMe+IwsxnA+4AfjHaOu28Ofm8FfgycND3RlYahGVUl0OKA7DiHWhwiEoqixfHHwPPu3jHSi2bWbGazw8fACmDDSOdWqn1rOEqnxfHarh4yvf1RhyIiJaCY03FvAX4FHG5mHWZ2cfDS+QzrpjKzBWZ2V/D0IOARM3sSeBS4093vLlacpSiRylBXayw4oDHqUIB9a0k0JVdEoLizqi4Y5fiqEY69ApwdPH4JOK5YcZWDZCrN4pYmaiOeihuKD03JTXPE/DkRRyMiUdPK8RKUiLi44XBLhqbkqsUhIkocJcfdSabSkZZTH25OQx2x5noNkIsIoMRRcjr39JDpHSipFgdkFwImutTiEBEljpITDkCXUosDsuMcanGICChxlJyNJTYVN9Qea+aVnd109w1EHYqIREyJo8QkU2lm1BiL5pXGVNxQvDXbAnp5m7qrRKqdEkeJSaQyLJrXyIza0vqfJmwBJbT/uEjVK61vJwlmVJVWNxXkruVQi0Ok2ilxlBB3J9mVKZkaVbnmNtVxQFOdyquLiBJHKdmW7mV3T39JtjggLHaoFodItVPiKCHhyuxwILrUZMurq8UhUu2UOEpIuE6ilFscr+zYS0+/puSKVDMljhKS6EpTY7B4Xum2OAYdOrbvjToUEYmQEkcJSaQyLJzXSP2M0vyfJSyvrim5ItWtNL+hqlQylS65FeO5htZyaIBcpKoVcyOnNWa21cw25Bz7ZzPbbGbrg5+zR7n2TDP7nZm9aGZXFCvGUpNIZUquRlWueU11zG6YoZpVIlWumC2OG4EzRzh+lbsvC37uGv6imdUC/wacBRwJXGBmRxYxzpKwI9PLzr19Jd3iMDPisWa1OESqXNESh7s/BGybxKUnAS+6+0vu3gv8J3BuQYMrQYmhqrilmzggW7VXLQ6R6hbFGMfHzeypoCtr3givLwQ25TzvCI5VtPDLuBRXjeeKx5rp2L6XvoHBqEMRkYhMd+K4FngDsAzYAnxtqjc0s9Vmts7M1nV2dk71dpHZ2JXGDBa3lHbiaI81MTDobNaUXJGqNa2Jw91fc/cBdx8Evk22W2q4zcDinOeLgmOj3fN6d1/u7svb2toKG/A0SqYyLJjbSENdbdShjCmckrtR3VUiVWtaE4eZzc95+ifAhhFOeww41MyWmlk9cD6wdjrii1KixPYZH81QlVyt5RCpWsWcjnsL8CvgcDPrMLOLgf9rZk+b2VPAu4C/Cc5dYGZ3Abh7P/Bx4B7gOeCH7v5MseIsFclUpuQHxgFaZ9XTXF+rmVUiVWxGsW7s7heMcPiGUc59BTg75/ldwOum6laqnXv72JbuLfmBcchOyW3X/uMiVU0rx0vAy2UyFTcUb21SeXWRKqbEUQLCUuWlWk59uPZYM5u2Z+jXlFyRqqTEUQLCooHtLWXS4og10TfgbNnZHXUoIhIBJY4SkEhlOHhOA431pT0VN9Q+VOxQ4xwi1WjcwXEzW0R2Suw7gAXAXrLTaO8Efh6syZApSJbJVNzQ0pzy6u84tHzXzojI5IzZ4jCzfwfWAL3AV4ALgP8F3E+2gOEjZvbOYgdZ6RKpTEkXNxzuwNkzaair0ZRckSo1Xovja+4+0iK9DcDtwQK9JYUPq3rs6emna08P7WUyMA77quRqSq5IdRqzxTFK0sh9vdfdXyxsSNVlX3HD8mlxQLZmlVocItVpzBaHmT0N+Givu/uxBY+oyiSH1nCUT4sDsonuwec7GRh0amss6nBEZBqN11V1TvD7Y8Hv7wa/LyxOONUnnJlULov/Qu2xZnoHBnl1VzcLD2iMOhwRmUZjJg53TwKY2bvd/c05L11hZk8AVbOta7EkutK0zZ7JrJlFq/5SFGF5lGRXWolDpMrku47DzOyUnCcnT+BaGUN2RlV5dVMBtIdTcjXOIVJ18v0z92JgjZnNDZ7vAC4qSkRVJpkqz7UQ8+c0UD+jRosARapQvoljm7sfFyYOd99pZkuLGFdVyPT289qunrJscdTUGO0tTUPlUkSkeuTb3fQjyCYMd98ZHLutOCFVj5e3lVdV3OGy5dXVVSVSbcabjvsm4Chgrpm9L+elOUBDMQOrBomu7Jduua3hCMVjTTzyYieDg06NpuSKVI3xuqoOJzsl9wDgvTnHdwOXjHWhma0Jrt3q7kcHx74a3KcX+APwEXffMcK1ieA9BoB+d18+/kcpP+HivyVl2FUF2QHy7r5Btu7u4eC5+jtCpFqMNx33J8BPzOxt7v6rCd77RuAa4OacY/cBn3H3fjP7CvAZ4NOjXP8ud++a4HuWlUQqTUtzPXMb66IOZVLCsZlEKq3EIVJF8h3j+BMzm2NmdWb2gJl1mtkHx7rA3R8Ctg07dm+wpzjAr4FFEw+5ciS6ynMqbijsYlPNKpHqkm/iWOHuu8h2PSWANwKXT/G9LwJ+PsprDtxrZo+b2eqxbmJmq81snZmt6+zsnGJI0yuZSpft+AbAggMaqas1NnZpgFykmuSbOMK+lPcAt+bMrJoUM/ss0A98b5RT3u7uxwNnAR8bq3S7u1/v7svdfXlbW/msh+juG+CVnd1lO6MKoLbGWNzSpBaHSJXJN3H81MyeB04AHjCzNmBS+4aa2SqyLZcL3X3EAoruvjn4vRX4MXDSZN6rlG0KpuKWyz7jo4nHmrV6XKTK5JU43P0K4GRgubv3ARng3Im+mZmdCfw9sNLdR/y2MbNmM5sdPgZWkN3/o6IkUuW9hiPUHsu2OEb5G0BEKlDe9abcfZu7DwSP0+7+6ljnm9ktwK+Aw82sw8wuJjvLajZwn5mtN7PrgnMXmNldwaUHkd1Z8EngUeBOd797wp+sxO3bh6P8WxyZ3gE69/REHYqITJOilWR19wtGOHzDKOe+ApwdPH4JOK5YcZWKjV1p5jbWcUBTfdShTEm4j0gyleHA2ZqSK1INVOE2IslUhnhreXdTwb4puapZJVI98kocwdiEFFAilS77biqAhfMaqa0x1awSqSJjJg4ze4uZ1QJfyjn23TEukTz09A/wyo69ZT8wDlBXW8PieY1s1JRckaoxXovjfOC/gUPM7CtmdiFwfPHDqmwd2/cy6OU/MB7KVslV4hCpFuMljk+7+9uBl4GfAfOAg83s12b2g6JHV6GSZbrP+GjisSaSXRlNyRWpEuPNqrrbzAaANqCVbImQi9z9rWZW1XWmpmJfOfXKaXHs7ulnW7qX2KyZUYcjIkU2ZovD3U8DVgJ7gEOALwJvNLM7yHZjySQkU2lmz5xBS3N5T8UNhavftYJcpDqMO6vK3fcCm9z9a+7+F2T30bgE2Fjs4CrVxlSG9tYmzCpj86N2VckVqSr5lhw5LefpNe7e6e4/KlJMFa/cq+IOt2heIzWmFodItZjwAkB3H3H1t+Snb2CQju17KypxzJxRy4IDGrUIUKRKjLeO46dm9l4ze90WdWZ2iJl9wcwuKl54lWfz9r0MDPpQqY5KsbRVU3JFqsV4LY5LgHcAz5vZY2Z2l5n9l5m9BHwLeNzd1xQ9ygqSCIsbVkC5kVztsSZ1VYlUifH2HH+VbBn0vzezODAf2Av8frSy6DK25FA59cpqccRjzezc28eOTG/ZF24UkbHlXR3X3RNkt42VKUik0jTV19JWYesdwplViVSGZUocIhVN1XGnWaIrTXusuWKm4obiQ+XVNc4hUumUOKZZMpVhaZlvFzuSxS1NmO1bFS8ilSvvxGFmjWZ2+ERubmZrzGyrmW3IOdZiZveZ2QvB73mjXPvh4JwXzOzDE3nfUtU/MMim7ZmKqVGVq6GulvlzGtTiEKkC+e7H8V5gPXB38HyZma3N49IbgeF7eVwBPODuhwIPBM+Hv18L8DngLcBJwOdGSzDlZMvObvoGvGJqVA0Xb21WeXWRKpBvi+OfyX6B7wBw9/XA0vEucveHgG3DDp8L3BQ8vgk4b4RLzwDuC/Y53w7cx+sTUNlJVFhV3OGy5dXVVSVS6fJNHH3uvnPYscnW0D7I3bcEj18FDhrhnIXAppznHcGx1zGz1Wa2zszWdXZ2TjKk6RGuc6ikVeO54rEmtqV72bm3L+pQRKSI8k0cz5jZXwC1ZnaomX0D+OVU39yzGzhMaRMHd7/e3Ze7+/K2traphlRUya40DXU1HDi7sqbihsKW1MtqdYhUtHwTxyeAo4Ae4PvATuCySb7na2Y2HyD4vXWEczYDi3OeLwqOlbVEKkN7SzM1NZU1FTe0r7y6xjlEKtm4iSPYc/xOd/+su58Y/Pyju3dP8j3XAuEsqQ8DPxnhnHuAFWY2LxgUXxEcK2uJVHroy7USLWnRWg6RapDPfhwDwKCZzZ3ozc3sFuBXwOFm1mFmFwNXAu82sxeAPw6eY2bLzew7wXtuI7tp1GPBzxeCY2VrYNB5OZWp2PENgKb6GRw0Z6ZqVolUuHxLjuwBnjaz+4ChPyfd/ZNjXeTuF4zy0ukjnLsO+GjO8zVAxRRQfHVXN70DgxU7oyrUHmtWeXWRCpdv4rg9+JFJSgZfppW6hiO0NNbMA8+PNGwlIpUir8Th7jeZWT1wWHDod+6uOZcTEHbftFdYOfXh2lub6NrTw56efmbNzLuGpoiUkXxXjp8KvAD8G/BN4Pdm9s7ihVV5kqk09TNqmD+nIepQiiqu/cdFKl6+fxJ+DVjh7r8DMLPDgFuAE4oVWKVJpNIsaWmq2Km4ofahKrkZjlow4fkUIlIG8l3HURcmDQB3/z3wuu1kZXSJrkzFj29A7r4canGIVKp8E8c6M/uOmZ0a/HwbWFfMwCrJ4KCT3Jau6Km4oVkzZ9A6ayZJlVcXqVj5dlX9FfAxIJx++zDZsQ7Jw9bdPXT3DVb8wHgoHmtSi0OkguWbOGYA/+ruX4eh1eSVWXCpCMIv0WroqoJsefWHXyjtgpMiMnn5dlU9ADTmPG8E7i98OJUpOZQ4qqfF8dquHjK9/VGHIiJFkG/iaHD3PeGT4HF1/PlcAIlUhrpaY/7cyp6KGxqqkrtN4xwilSjfxJE2s+PDJ2Z2ArC3OCFVnmQqzeJ5TcyorY4t3sOWlfYfF6lM+Y5xXAbcamavAAYcDPx5sYKqNBu7MkPrG6rBkpiq5IpUsnxLjjxmZm8CDg8OqeRIntydZCrNWw9piTqUaTO3sY6W5npVyRWpUPmWHPkzsuMcG8juEf6D3K4rGV3nnh4yvQNVMzAeao81qcUhUqHy7XT/3+6+28zeTrYk+g3AtcULq3Ikw+KGVdRVBdlxjqRaHCIVKd/EMRD8fg/wbXe/E6gvTkiVJdFVXVNxQ/FYM6/s3Et338D4J4tIWck3cWw2s2+RHRC/y8xmTuDa/ZjZ4Wa2Pudnl5ldNuycU81sZ845/zSZ9yoFyVSG2hpj4bzG8U+uIPHWJtxhk6bkilScfGdVfQA4E/gXd99hZvOByyfzhkGxxGUwtAJ9M/DjEU592N3Pmcx7lJJEKs2ieY3UVclU3NC+YocZDj1odsTRiEgh5TurKkPODoDuvgXYUoD3Px34g7snC3CvkpRMZSp+u9iRxDUlV6RiRf1n8Plk9/UYydvM7Ekz+7mZHTXaDcxstZmtM7N1nZ2lVR/J3Ul0paumRlWuA5rqmdtYp2KHIhUossQRbEW7Erh1hJefANrd/TjgG8Ado93H3a939+Xuvrytra0osU7WtnQvu3v6q25gPBSPNWlmlUgFirLFcRbwhLu/NvwFd98V1sZy97uAOjNrne4ApypcABdvrb4WB2THOdTiEKk8USaOCxilm8rMDjYzCx6fRDbO1DTGVhBh/341jnFAtrz65u176e0fjDoUESmgSBKHmTUD7yZnwN3MLjWzS4On7wc2mNmTwNXA+e7u0x/p1CRSGWoMFlXZVNxQPNbEoMOm7equEqkk+U7HLSh3TwOxYceuy3l8DXDNdMdVaMlUmgUHNDJzRm3UoUQibGklU2ne0DYr4mhEpFCinlVV0RKpTNUOjMO+Kbkqry5SWZQ4iijRla66GlW5WprrmT1zhtZyiFQYJY4i2ZHpZefePpa2Vm+Lw8xob21SeXWRCqPEUSSJoaq41Zs4IPv51eIQqSxKHEUSfllW46rxXPFYEx3b99I3oCm5IpVCiaNIEl0ZzGBxS7Unjmb6B53N27VFvUilUOIokmQqzfw5DTTUVedU3FC8NaySq+4qkUqhxFEkiVS66sc3YN/Oh6pZJVI5lDiKJJHKVG2Nqlxts2bSVF+rFodIBVHiKIKde/vYlu6t6sV/ITMLZlapxSFSKZQ4iuBlTcXdTzzWpBaHSAVR4iiC8EtSXVVZ7bFmNm3LMDBYdnUqRWQEShxFEK7hWFLlU3FD8VgTfQPOKzs0JVekEihxFEEileGgOTNpqo+k+HDJ0ZRckcqixFEESU3F3U84SUA1q0QqQ5R7jifM7GkzW29m60Z43czsajN70cyeMrPjo4hzMrLl1NVNFTpw9kwa6mpIdqnFIVIJou5LeZe7d43y2lnAocHPW4Brg98lbU9PP527e9TiyFFTY7S3NKvFIVIhSrmr6lzgZs/6NXCAmc2POqjxhAPj1VxOfSTtsSZVyRWpEFEmDgfuNbPHzWz1CK8vBDblPO8Iju3HzFab2TozW9fZ2VmkUPOXHFrDoa6qXPHWZpLbMgxqSq5I2Ysycbzd3Y8n2yX1MTN752Ru4u7Xu/tyd1/e1tZW2AgnIZw5pK6q/bXHmujtH+TVXd1RhyIiUxRZ4nD3zcHvrcCPgZOGnbIZWJzzfFFwrKQluzK0zprJrJlRDx+VlqXhzCoNkIuUvUgSh5k1m9ns8DGwAtgw7LS1wIeC2VVvBXa6+5ZpDnXCEqm0ZlSNoL1VU3JFKkVUfxYfBPzYzMIYvu/ud5vZpQDufh1wF3A28CKQAT4SUawTkkxlOOWNrVGHUXLmz2mgfkaNBshFKkAkicPdXwKOG+H4dTmPHfjYdMY1VXt7B3h1V7daHCOoqTGWtKjYoUglKOXpuGUnuS0sbqiB8ZHEY00qry5SAZQ4CijRlf1S1D4cI2uPNZNIpck2JkWkXClxFNBQVVx1VY0oHmuiu2+Qrbt7og5FRKZAiaOAEqkMLc31zG2sizqUkhSubdmoKbkiZU2Jo4CyVXHV2hhNWIZFM6tEypsSRwElUxmNb4xh/twG6mpNazlEypwSR4F09w3wys69anGMYUZtDYvnqdihSLlT4iiQTdsyuGtG1XjaY01Ds89EpDwpcRRI2P2iNRxja481k9SUXJGypsRRIGH3i1aNjy0eayLdO0DXnt6oQxGRSVLiKJBEKs3cxjoOaKqPOpSS1q6ZVSJlT4mjQJLaZzwvS7WWQ6TsKXEUSCKV1uZNeVg4r5HaGlPNKpEypsRRAL39g2zevlctjjzU1dawaF6jquSKlDEljgLYtD3DoGu72HxlZ1apxSFSrpQ4CmBoRpWm4uYlHmtSlVyRMjbticPMFpvZg2b2rJk9Y2Z/PcI5p5rZTjNbH/z803THORH7yqmrqyof7bFmdnf3sz3TF3UoIjIJUewA2A/8rbs/Eew7/riZ3efuzw4772F3PyeC+CYsmUoze+YMWpo1FTcfYYJNpNL6NxMpQ9Pe4nD3Le7+RPB4N/AcsHC64yikRCpDe2sTwR7qMo5wLEhrOUTKU6RjHGYWB94M/GaEl99mZk+a2c/N7Kgx7rHazNaZ2brOzs5ihTqmpKbiTsjilkZqDDaqZpVIWYoscZjZLOBHwGXuvmvYy08A7e5+HPAN4I7R7uPu17v7cndf3tbWVrR4R9M3MEiHpuJOyMwZtSw4oFEtDpEyFUniMLM6sknje+5++/DX3X2Xu+8JHt8F1JlZ6zSHmZfN2/fSP+hqcUxQPNasfTlEylQUs6oMuAF4zt2/Pso5BwfnYWYnkY0zNX1R5i9cyLZUU3EnpD2mfTlEylUUs6pOAf4SeNrM1gfH/gFYAuDu1wHvB/7KzPqBvcD5XqKT/sOFbNrAaWLisWZ2ZPrYkelVYUiRMjPticPdHwHGnH7k7tcA10xPRFOTSKVpqq+lbdbMqEMpK2GiTaYyShwiZUYrx6comcrQHmvWVNwJClfZq2aVSPlR4piiRCqtGVWTsKSlCTO0jaxIGVLimIKBQWfTtoxmVE1CQ10t8+c0aIBcpAwpcUzBKzv20jfganFMUnusWV1VImVIiWMKwi89tTgmJ97apPLqImVIiWMKwgVsWsMxOe2xZlLpXnZ1q0quSDlR4piCZFeahroaDpytqbiTEXbxvaxWh0hZUeKYgkQqQ3tLMzU1moo7GWEXn8Y5RMqLEscUZKviamB8snIXAYpI+VDimKTBQSe5LaPtYqegqX4GB82ZycYutThEyokSxyS9uqub3v5BtTimqD3WrLUcImVGiWOSEsFfyXFNxZ2SeKxJ5dVFyowSxySFX3bqqpqa9lgznbt7SPf0Rx2KiORJiWOSkqk09TNqmD+nIepQylp8aP9xtTpEyoUSxyQlUmmWtDRpKu4U7ZtZpXEOkXKhxDFJyVRGNaoKIEwcGucQKR9R7Tl+ppn9zsxeNLMrRnh9ppn9IHj9N2YWjyDMUbk7iVRaNaoKYHZDHa2z6ocmG4hI6Ytiz/Fa4N+As4AjgQvM7Mhhp10MbHf3NwJXAV+Z3ijHtnV3D919g2pxFEhcVXJFykoUe46fBLzo7i8BmNl/AucCz+accy7wz8Hj24BrzMxKZd/x8K9jtTgKoz3WzO2/7eCof7o76lBESsJ3P/oWjl8yL+owRhVF4lgIbMp53gG8ZbRz3L3fzHYCMaBr+M3MbDWwOnjaY2YbCh7xKP5oettBrYzw+SuIPl950+croBO+OF3vBMDhE70gisRRUO5+PXA9gJmtc/flEYdUFJX82UCfr9zp85UvM1s30WuiGBzfDCzOeb4oODbiOWY2A5gLpKYlOhERGVMUieMx4FAzW2pm9cD5wNph56wFPhw8fj/wX6UyviEiUu2mvasqGLP4OHAPUAuscfdnzOwLwDp3XwvcAHzXzF4EtpFNLvm4vihBl4ZK/mygz1fu9PnK14Q/m+kPeRERmQitHBcRkQlR4hARkQmpiMQxXgmTcmZmi83sQTN71syeMbO/jjqmQjOzWjP7rZn9LOpYisHMDjCz28zseTN7zszeFnVMhWJmfxP8d7nBzG4xs7IuF21ma8xsa+56MDNrMbP7zOyF4Hfprswbxyif76vBf5tPmdmPzeyA8e5T9okjzxIm5awf+Ft3PxJ4K/CxCvt8AH8NPBd1EEX0r8Dd7v4m4Dgq5LOa2ULgk8Bydz+a7GSXfCeylKobgTOHHbsCeMDdDwUeCJ6Xqxt5/ee7Dzja3Y8Ffg98ZryblH3iIKeEibv3AmEJk4rg7lvc/Yng8W6yXzoLo42qcMxsEfAe4DtRx1IMZjYXeCfZmYK4e6+774g0qMKaATQG662agFcijmdK3P0hsjM5c50L3BQ8vgk4bzpjKqSRPp+73+vu4U5qvya7tm5MlZA4RiphUjFfrLmCKsFvBn4TcSiF9P+AvwcGI46jWJYCncC/B91x3zGziihy5u6bgX8BXga2ADvd/d5ooyqKg9x9S/D4VeCgKIMpsouAn493UiUkjqpgZrOAHwGXufuuqOMpBDM7B9jq7o9HHUsRzQCOB6519zcDacq7q2NI0Nd/LtnkuABoNrMPRhtVcQULkStyDYOZfZZs1/j3xju3EhJHPiVMypqZ1ZFNGt9z99ujjqeATgFWmlmCbBfjaWb2H9GGVHAdQIe7h63E28gmkkrwx8BGd+909z7gduDkiGMqhtfMbD5A8HtrxPEUnJmtAs4BLsynSkclJI58SpiULTMzsv3jz7n716OOp5Dc/TPuvsjd42T/d/svd6+ov1jd/VVgk5mFFUhPZ/8tBMrZy8Bbzawp+O/0dCpk4H+Y3BJIHwZ+EmEsBWdmZ5LtLl7p7nltxVn2iSMY1AlLmDwH/NDdn4k2qoI6BfhLsn+Nrw9+zo46KJmQTwDfM7OngGXAl6INpzCCVtRtwBPA02S/T8q6NIeZ3QL8CjjczDrM7GLgSuDdZvYC2VbWlVHGOBWjfL5rgNnAfcH3y3Xj3kclR0REZCLKvsUhIiLTS4lDREQmRIlDREQmRIlDREQmRIlDREQmRIlDZJqZ2WVm1pTz/K58KpJO4P7NZnZ/8PiRoI6USMEocYgUmGWN9f+ty8gWBATA3c8ucOHDtwG/CkqCpHMK2IkUhBKHVAUz+9/Bni2PBPtG/F1w/A1mdreZPW5mD5vZm4LjN5rZ1Wb2SzN7yczen3Ovy83ssWD/gs8Hx+LB/W8GNgCLzexaM1sX7FcRnvdJsnWdHjSzB4NjCTNrDR5/KtjbYoOZXZZz7+fM7NvBve41s8YRPuMbzGw98B/AXwCPA8cFi7oOLM6/rFQld9ePfir6BzgRWA80kF0h+wLwd8FrDwCHBo/fQrbsCWT3LbiV7B9XR5It3Q+wguzqaAte+xnZsulxshV+35rzvi3B71rgF8CxwfME0JpzXgJoBU4guwK7GZgFPEO2GnKcbPG5ZcH5PwQ+OMbnvROIAZ8D3hP1v79+Ku9HfZ9SDU4BfuLu3UC3mf0UhioOnwzcmi21BMDMnOvucPdB4FkzC0tprwh+fhs8nwUcSrZuU9Ldf51z/QfMbDXZCrnzySagp8aI8+3Aj909HcR3O/AOsrWSNrr7+uC8x8kmk9Ec6O4pMzuWYB8QkUJS4pBqVgPscPdlo7zek/PYcn5/2d2/lXtisFdKOuf5UuDvgBPdfbuZ3Ui2xTNZubEMACN1VV1HNvksCrqsDgV+ZmY3uftVU3hvkf1ojEOqwf8A7zWzhqCVcQ6AZ/c12WhmfwZDg9rHjXOve4CLgvtgZgtHGT+YQzaR7AxaK2flvLabbJfZcA8D5wXVZpuBPwmO5cXdLwU+D3yR7C51d7r7MiUNKTS1OKTiuftjZraWbDfRa2THEXYGL18IXGtm/wjUkd0X5Mkx7nWvmR1BdtYSwB7gg2RbAbnnPWlmvwWeJ7tD5f/kvHw9cLeZveLu78q55omgZfJocOg77v7boDWTrz8CbibbxfXfE7hOJG+qjitVwcxmufueYP3EQ8BqD/ZyF5GJUYtDqsX1ZnYk2XGGm5Q0RCZPLQ4REZkQDY6LiMiEKHGIiMiEKHGIiMiEKHGIiMiEKHGIiMiE/H8VkEFqGYdxdgAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_fitness(log)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "CMA versus IBEA \n", + "-------------------------" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As the present package proposes both the IBEA and CMA evolutionary strategies, one might ask: Which one is the best?\n", + "\n", + "Unfortunately, there is no definitive answer to this question.\n", + "\n", + "From a theoretical point of view, the advantages of the CMA strategy seem strong:\n", + "- In IBEA, the creation of a new generation is performed through random mating and mutation. However, due to the lack of a learning rate, this leads to a lack of convergence in the latter stage of the optimisation as the models will jump around an optimal solution without being able to reach it. In CMA, the `sigma` parameter, which is the width of the distribution from which the models should be drawn, decreases once the optimisation finds a bassin in the fitness landscape, leading to smoother convergence.\n", + "- In IBEA, as the new generation only depends on the latest one, the knowledge contained in the previous generations is almost completely lost. In CMA, the covariance matrix continusly evolves, taking into account the results of each generation, leading to an accumulation of past knowledge about the shape of the local fitness landscape.\n", + "- The ideal CMA population size, computed as int(4 + 3 * log(dimension_parameter_space)) is often one or two order of magnitude smaller than the population size needed by IBEA to reach the same results. This results in less compute per generation for the CMA strategy.\n", + "\n", + "However, CMA is not without drawbacks:\n", + "- It is frequent for the CMA strategy (especially the SO-CMA) to converge too quickly and thus get stuck in sub-optimal minima. Therefore, to achieve the exploration level displayed by the IBEA strategy, it might be needed to run several CMA optimisations in parallel and pool the results.\n", + "- Although the population size is much smaller when using the CMA strategy, a proper convergence might require many more generations than for the IBEA strategy, thus nulliying the advantage of the small generation in term of compute.\n", + "\n", + "Overall, CMA makes a more clever use of the information available, but IBEA is not to be neglected, especially if more compute power is available." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "lfpyenv_new", + "language": "python", + "name": "lfpyenv_new" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/requirements_docs.txt b/requirements_docs.txt index 72ef1d11..dfce7960 100644 --- a/requirements_docs.txt +++ b/requirements_docs.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2021, EPFL/Blue Brain Project +# Copyright (c) 2016-2022, EPFL/Blue Brain Project # # This file is part of BluePyOpt # This library is free software; you can redistribute it and/or modify it under