diff --git a/src/evox/algorithms/de_variants/de.py b/src/evox/algorithms/de_variants/de.py index f377955ee..467fc875f 100644 --- a/src/evox/algorithms/de_variants/de.py +++ b/src/evox/algorithms/de_variants/de.py @@ -82,15 +82,15 @@ def __init__( # Initialize population if mean is not None and stdev is not None: # Initialize population using a normal distribution - population = mean + stdev * torch.randn(self.pop_size, self.dim, device=device) - population = clamp(population, lb=self.lb, ub=self.ub) + pop = mean + stdev * torch.randn(self.pop_size, self.dim, device=device) + pop = clamp(pop, lb=self.lb, ub=self.ub) else: # Initialize population uniformly within bounds - population = torch.rand(self.pop_size, self.dim, device=device) - population = population * (self.ub - self.lb) + self.lb + pop = torch.rand(self.pop_size, self.dim, device=device) + pop = pop * (self.ub - self.lb) + self.lb # Mutable attributes to store population and fitness - self.population = Mutable(population) + self.pop = Mutable(pop) self.fitness = Mutable(torch.empty(self.pop_size, device=device).fill_(float("inf"))) def init_step(self): @@ -99,7 +99,7 @@ def init_step(self): This method evaluates the fitness of the initial population and then calls the `step` method to perform the first optimization iteration. """ - self.fitness = self.evaluate(self.population) + self.fitness = self.evaluate(self.pop) self.step() def step(self): @@ -113,7 +113,7 @@ def step(self): The method ensures that all new population vectors are clamped within the specified bounds. """ - device = self.population.device + device = self.pop.device num_vec = self.num_difference_vectors * 2 + (0 if self.best_vector else 1) random_choices = [] @@ -127,36 +127,33 @@ def step(self): if self.best_vector: # Use the best individual as the base vector best_index = torch.argmin(self.fitness) - base_vector = self.population[best_index][None, :] + base_vector = self.pop[best_index][None, :] start_index = 0 else: # Use randomly selected individuals as base vectors - base_vector = self.population[random_choices[0]] + base_vector = self.pop[random_choices[0]] start_index = 1 # Generate difference vectors by subtracting randomly chosen population vectors difference_vector = torch.stack( - [ - self.population[random_choices[i]] - self.population[random_choices[i + 1]] - for i in range(start_index, num_vec - 1, 2) - ] + [self.pop[random_choices[i]] - self.pop[random_choices[i + 1]] for i in range(start_index, num_vec - 1, 2)] ).sum(dim=0) # Create mutant vectors by adding weighted difference vectors to the base vector - new_population = base_vector + self.differential_weight * difference_vector + new_pop = base_vector + self.differential_weight * difference_vector # Crossover: Determine which dimensions to crossover based on the crossover probability cross_prob = torch.rand(self.pop_size, self.dim, device=device) random_dim = torch.randint(0, self.dim, (self.pop_size, 1), device=device) mask = cross_prob < self.cross_probability mask = mask.scatter(dim=1, index=random_dim, value=1) - new_population = torch.where(mask, new_population, self.population) + new_pop = torch.where(mask, new_pop, self.pop) # Ensure new population is within bounds - new_population = clamp(new_population, self.lb, self.ub) + new_pop = clamp(new_pop, self.lb, self.ub) # Selection: Evaluate fitness of the new population and select the better individuals - new_fitness = self.evaluate(new_population) + new_fitness = self.evaluate(new_pop) compare = new_fitness < self.fitness - self.population = torch.where(compare[:, None], new_population, self.population) + self.pop = torch.where(compare[:, None], new_pop, self.pop) self.fitness = torch.where(compare, new_fitness, self.fitness) diff --git a/src/evox/algorithms/es_variants/open_es.py b/src/evox/algorithms/es_variants/open_es.py index 16c3e63d2..acc6a3e02 100644 --- a/src/evox/algorithms/es_variants/open_es.py +++ b/src/evox/algorithms/es_variants/open_es.py @@ -65,8 +65,8 @@ def step(self): noise = torch.cat([noise, -noise], dim=0) else: noise = torch.randn(self.pop_size, self.dim, device=device) - population = self.center[None, :] + self.noise_stdev * noise - fitness = self.evaluate(population) + pop = self.center[None, :] + self.noise_stdev * noise + fitness = self.evaluate(pop) grad = noise.T @ fitness / self.pop_size / self.noise_stdev if self.optimizer is None: center = self.center - self.learning_rate * grad diff --git a/src/evox/algorithms/pso_variants/dms_pso_el.py b/src/evox/algorithms/pso_variants/dms_pso_el.py index 9f52b4aa2..87d623646 100644 --- a/src/evox/algorithms/pso_variants/dms_pso_el.py +++ b/src/evox/algorithms/pso_variants/dms_pso_el.py @@ -72,22 +72,22 @@ def __init__( lb = lb[None, :].to(device=device) ub = ub[None, :].to(device=device) length = ub - lb - population = torch.rand(self.pop_size, self.dim, device=device) - population = length * population + lb + pop = torch.rand(self.pop_size, self.dim, device=device) + pop = length * pop + lb velocity = torch.rand(self.pop_size, self.dim, device=device) velocity = 2 * length * velocity - length # write to self self.lb = lb self.ub = ub self.iteration = Mutable(torch.tensor(0, dtype=torch.int32, device=device)) - dynamic_swarms = population[: self.dynamic_sub_swarm_size * self.dynamic_sub_swarms_num, :] + dynamic_swarms = pop[: self.dynamic_sub_swarm_size * self.dynamic_sub_swarms_num, :] dynamic_swarms = dynamic_swarms.reshape(self.dynamic_sub_swarms_num, self.dynamic_sub_swarm_size, self.dim) local_best_location = dynamic_swarms[:, 0, :] local_best_fitness = torch.empty(self.dynamic_sub_swarms_num, device=device).fill_(torch.inf) # mutable - self.population = Mutable(population) + self.pop = Mutable(pop) self.velocity = Mutable(velocity) - self.personal_best_location = Mutable(population) + self.personal_best_location = Mutable(pop) self.personal_best_fitness = Mutable(torch.empty(self.pop_size, device=device).fill_(torch.inf)) self.local_best_location = Mutable(local_best_location) self.local_best_fitness = Mutable(local_best_fitness) @@ -105,7 +105,7 @@ def step(self): the dynamic sub-swarm and following sub-swarm. Finally, it updates the iteration count. """ - fitness = self.evaluate(self.population) + fitness = self.evaluate(self.pop) if self.iteration < 0.9 * self.max_iteration: self._update_strategy_1(fitness) else: @@ -114,7 +114,7 @@ def step(self): @trace_impl(step) def trace_step(self): - fitness = self.evaluate(self.population) + fitness = self.evaluate(self.pop) cond = self.iteration < 0.9 * self.max_iteration branches = (self._update_strategy_1, self._update_strategy_2) state, names = self.prepare_control_flow(*branches) @@ -127,33 +127,33 @@ def _update_strategy_1(self, fitness: torch.Tensor): self._cond_regroup(fitness) # Update personal_best compare = self.personal_best_fitness > fitness - personal_best_location = torch.where(compare[:, None], self.population, self.personal_best_location) + personal_best_location = torch.where(compare[:, None], self.pop, self.personal_best_location) personal_best_fitness = torch.where(compare, fitness, self.personal_best_fitness) # Update dynamic swarms dynamic_size = self.dynamic_sub_swarm_size * self.dynamic_sub_swarms_num dynamic_size_tuple = (self.dynamic_sub_swarms_num, self.dynamic_sub_swarm_size) - dynamic_swarms_location = self.population[:dynamic_size, :].view(*dynamic_size_tuple, self.dim) + dynamic_swarms_location = self.pop[:dynamic_size, :].view(*dynamic_size_tuple, self.dim) dynamic_swarms_fitness = fitness[:dynamic_size].view(*dynamic_size_tuple) dynamic_swarms_velocity = self.velocity[:dynamic_size, :].view(*dynamic_size_tuple, self.dim) dynamic_swarms_pbest = personal_best_location[:dynamic_size, :].view(*dynamic_size_tuple, self.dim) # Update following swarm - following_swarm_location = self.population[dynamic_size:, :] + following_swarm_location = self.pop[dynamic_size:, :] following_swarm_velocity = self.velocity[dynamic_size:, :] following_swarm_pbest = personal_best_location[dynamic_size:, :] # Update local_best local_best_fitness, local_best_index = torch.min(dynamic_swarms_fitness, dim=1) # shape:(dynamic_sub_swarms_num,) local_best_location = torch.index_select(dynamic_swarms_location, 1, local_best_index).diagonal().T # Update regional_best - regional_best_location = self.population[self.regional_best_index, :] + regional_best_location = self.pop[self.regional_best_index, :] # Calculate Dynamic Swarms Velocity - rand_pbest = torch.rand(self.pop_size, self.dim, device=self.population.device) + rand_pbest = torch.rand(self.pop_size, self.dim, device=self.pop.device) rand_lbest = torch.rand( self.dynamic_sub_swarms_num, self.dynamic_sub_swarm_size, self.dim, - device=self.population.device, + device=self.pop.device, ) - rand_rbest = torch.rand(self.following_sub_swarm_size, self.dim, device=self.population.device) + rand_rbest = torch.rand(self.following_sub_swarm_size, self.dim, device=self.pop.device) dynamic_swarms_rand_pbest = rand_pbest[:dynamic_size, :].view(*dynamic_size_tuple, self.dim) dynamic_swarms_velocity = ( self.w * dynamic_swarms_velocity @@ -170,8 +170,8 @@ def _update_strategy_1(self, fitness: torch.Tensor): # Update Population dynamic_swarms_velocity = dynamic_swarms_velocity.view(dynamic_size, self.dim) velocity = torch.cat([dynamic_swarms_velocity, following_swarm_velocity], dim=0) - population = self.population + velocity - self.population = clamp(population, self.lb, self.ub) + pop = self.pop + velocity + self.pop = clamp(pop, self.lb, self.ub) self.velocity = clamp(velocity, self.lb, self.ub) self.personal_best_location = personal_best_location self.personal_best_fitness = personal_best_fitness @@ -193,10 +193,10 @@ def _regroup(self, fitness: torch.Tensor): sort_index = torch.argsort(fitness, dim=0) dynamic_size = self.dynamic_sub_swarm_size * self.dynamic_sub_swarms_num dynamic_swarm_population_index = sort_index[:dynamic_size] - dynamic_swarm_population_index = torch.randperm(dynamic_size, device=self.population.device) + dynamic_swarm_population_index = torch.randperm(dynamic_size, device=self.pop.device) regroup_index = torch.cat([dynamic_swarm_population_index, sort_index[dynamic_size:]]) - population = self.population[regroup_index] + pop = self.pop[regroup_index] velocity = self.velocity[regroup_index] personal_best_location = self.personal_best_location[regroup_index] personal_best_fitness = self.personal_best_fitness[regroup_index] @@ -204,7 +204,7 @@ def _regroup(self, fitness: torch.Tensor): dynamic_swarm_fitness = fitness[:dynamic_size] regional_best_index = torch.argsort(dynamic_swarm_fitness, dim=0)[: self.following_sub_swarm_size] - self.population = population + self.pop = pop self.velocity = velocity self.personal_best_location = personal_best_location self.personal_best_fitness = personal_best_fitness @@ -213,21 +213,21 @@ def _regroup(self, fitness: torch.Tensor): def _update_strategy_2(self, fitness: torch.Tensor): # Update personal_best compare = self.personal_best_fitness > fitness - personal_best_location = torch.where(compare[:, None], self.population, self.personal_best_location) + personal_best_location = torch.where(compare[:, None], self.pop, self.personal_best_location) personal_best_fitness = torch.where(compare, fitness, self.personal_best_fitness) # Update global_best global_best_fitness, global_best_idx = torch.min(personal_best_fitness, dim=0) global_best_location = personal_best_location[global_best_idx] - rand_pbest = torch.rand(self.pop_size, self.dim, device=self.population.device) - rand_gbest = torch.rand(self.pop_size, self.dim, device=self.population.device) + rand_pbest = torch.rand(self.pop_size, self.dim, device=self.pop.device) + rand_gbest = torch.rand(self.pop_size, self.dim, device=self.pop.device) velocity = ( self.w * self.velocity - + self.c_pbest * rand_pbest * (personal_best_location - self.population) - + self.c_gbest * rand_gbest * (global_best_location - self.population) + + self.c_pbest * rand_pbest * (personal_best_location - self.pop) + + self.c_gbest * rand_gbest * (global_best_location - self.pop) ) - population = self.population + velocity + pop = self.pop + velocity # Update population - self.population = clamp(population, self.lb, self.ub) + self.pop = clamp(pop, self.lb, self.ub) self.velocity = clamp(velocity, self.lb, self.ub) self.personal_best_location = personal_best_location self.personal_best_fitness = personal_best_fitness diff --git a/src/evox/algorithms/pso_variants/pso.py b/src/evox/algorithms/pso_variants/pso.py index 92bbdeac6..3cc0142fe 100644 --- a/src/evox/algorithms/pso_variants/pso.py +++ b/src/evox/algorithms/pso_variants/pso.py @@ -87,29 +87,29 @@ def step(self): best). The population positions are then updated using the new velocities. """ compare = self.local_best_fit > self.fit - self.local_best_location = torch.where(compare[:, None], self.population, self.local_best_location) + self.local_best_location = torch.where(compare[:, None], self.pop, self.local_best_location) self.local_best_fit = torch.where(compare, self.fit, self.local_best_fit) self.global_best_location, self.global_best_fit = min_by( - [self.global_best_location.unsqueeze(0), self.population], + [self.global_best_location.unsqueeze(0), self.pop], [self.global_best_fit.unsqueeze(0), self.fit], ) rg = torch.rand(self.pop_size, self.dim, device=self.fit.device) rp = torch.rand(self.pop_size, self.dim, device=self.fit.device) velocity = ( self.w * self.velocity - + self.phi_p * rp * (self.local_best_location - self.population) - + self.phi_g * rg * (self.global_best_location - self.population) + + self.phi_p * rp * (self.local_best_location - self.pop) + + self.phi_g * rg * (self.global_best_location - self.pop) ) - population = self.population + velocity - self.population = clamp(population, self.lb, self.ub) + pop = self.pop + velocity + self.pop = clamp(pop, self.lb, self.ub) self.velocity = clamp(velocity, self.lb, self.ub) - self.fit = self.evaluate(self.population) + self.fit = self.evaluate(self.pop) def init_step(self): """Perform the first step of the PSO optimization. See `step` for more details. """ - self.fit = self.evaluate(self.population) + self.fit = self.evaluate(self.pop) self.local_best_fit = self.fit self.global_best_fit = torch.min(self.fit)