Skip to content

Commit 6aeb685

Browse files
committed
Support of NSGA-II
1 parent 3163d7f commit 6aeb685

File tree

5 files changed

+137
-45
lines changed

5 files changed

+137
-45
lines changed

pygad/helper/nsga2.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,8 @@ def get_non_dominated_set(curr_solutions):
3636
# Checking for if any solution dominates the current solution by applying the 2 conditions.
3737
# le_eq (less than or equal): All elements must be True.
3838
# le (less than): Only 1 element must be True.
39-
le_eq = two_solutions[:, 1] <= two_solutions[:, 0]
40-
le = two_solutions[:, 1] < two_solutions[:, 0]
39+
le_eq = two_solutions[:, 1] >= two_solutions[:, 0]
40+
le = two_solutions[:, 1] > two_solutions[:, 0]
4141

4242
# If the 2 conditions hold, then a solution dominates the current solution.
4343
# The current solution is not considered a member of the dominated set.
@@ -175,8 +175,9 @@ def crowding_distance(pareto_front, fitness):
175175
# If there are only 2 solutions in the current pareto front, then do not proceed.
176176
# The crowding distance for such 2 solutions is infinity.
177177
if len(obj_sorted) <= 2:
178+
obj_crowding_dist_list.append(obj_sorted)
178179
break
179-
180+
180181
for idx in range(1, len(obj_sorted)-1):
181182
# Calculate the crowding distance.
182183
crowding_dist = obj_sorted[idx+1][1] - obj_sorted[idx-1][1]

pygad/pygad.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2099,7 +2099,7 @@ def run(self):
20992099
:] = self.last_generation_offspring_mutation
21002100
else:
21012101
self.last_generation_elitism, self.last_generation_elitism_indices = self.steady_state_selection(self.last_generation_fitness,
2102-
num_parents=self.keep_elitism)
2102+
num_parents=self.keep_elitism)
21032103
self.population[0:self.last_generation_elitism.shape[0],
21042104
:] = self.last_generation_elitism
21052105
self.population[self.last_generation_elitism.shape[0]:, :] = self.last_generation_offspring_mutation

pygad/utils/mutation.py

Lines changed: 32 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -474,7 +474,8 @@ def adaptive_mutation_population_fitness(self, offspring):
474474
if len(fitness.shape) > 1:
475475
# TODO This is a multi-objective optimization problem.
476476
# fitness[first_idx:last_idx] = [0]*(last_idx - first_idx)
477-
raise ValueError('Edit adaptive mutation to work with multi-objective optimization problems.')
477+
fitness[first_idx:last_idx] = numpy.zeros(shape=(last_idx - first_idx, fitness.shape[1]))
478+
# raise ValueError('Edit adaptive mutation to work with multi-objective optimization problems.')
478479
else:
479480
# This is a single-objective optimization problem.
480481
fitness[first_idx:last_idx] = [0]*(last_idx - first_idx)
@@ -514,7 +515,13 @@ def adaptive_mutation_population_fitness(self, offspring):
514515
for idx in range(batch_first_index, batch_last_index):
515516
fitness[idx] = fitness_temp[idx - batch_first_index]
516517

517-
average_fitness = numpy.mean(fitness)
518+
if len(fitness.shape) > 1:
519+
# TODO This is a multi-objective optimization problem.
520+
# Calculate the average of each objective's fitness across all solutions in the population.
521+
average_fitness = numpy.mean(fitness, axis=0)
522+
else:
523+
# This is a single-objective optimization problem.
524+
average_fitness = numpy.mean(fitness)
518525

519526
return average_fitness, fitness[len(parents_to_keep):]
520527

@@ -690,10 +697,30 @@ def adaptive_mutation_randomly(self, offspring):
690697
# Adaptive random mutation changes one or more genes in each offspring randomly.
691698
# The number of genes to mutate depends on the solution's fitness value.
692699
for offspring_idx in range(offspring.shape[0]):
693-
if offspring_fitness[offspring_idx] < average_fitness:
694-
adaptive_mutation_num_genes = self.mutation_num_genes[0]
700+
## TODO Make edits to work with multi-objective optimization.
701+
# Compare the fitness of each offspring to the average fitness of each objective function.
702+
fitness_comparison = offspring_fitness[offspring_idx] < average_fitness
703+
# Check if the problem is single or multi-objective optimization.
704+
if type(fitness_comparison) is bool:
705+
# Single-objective optimization problem.
706+
if fitness_comparison:
707+
adaptive_mutation_num_genes = self.mutation_num_genes[0]
708+
else:
709+
adaptive_mutation_num_genes = self.mutation_num_genes[1]
695710
else:
696-
adaptive_mutation_num_genes = self.mutation_num_genes[1]
711+
# Multi-objective optimization problem.
712+
713+
# Get the sum of the pool array (result of comparison).
714+
# True is considered 1 and False is 0.
715+
fitness_comparison_sum = sum(fitness_comparison)
716+
# Check if more than or equal to 50% of the objectives have fitness greater than the average.
717+
# If True, then use the first percentage.
718+
# If False, use the second percentage.
719+
if fitness_comparison_sum >= len(fitness_comparison)/2:
720+
adaptive_mutation_num_genes = self.mutation_num_genes[0]
721+
else:
722+
adaptive_mutation_num_genes = self.mutation_num_genes[1]
723+
697724
mutation_indices = numpy.array(random.sample(range(0, self.num_genes), adaptive_mutation_num_genes))
698725
for gene_idx in mutation_indices:
699726

pygad/utils/parent_selection.py

Lines changed: 99 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -7,17 +7,23 @@
77

88
class ParentSelection:
99
def steady_state_selection(self, fitness, num_parents):
10-
10+
1111
"""
12-
Selects the parents using the steady-state selection technique. Later, these parents will mate to produce the offspring.
12+
Selects the parents using the steady-state selection technique.
13+
This is by sorting the solutions based on the fitness and select the best ones as parents.
14+
Later, these parents will mate to produce the offspring.
15+
1316
It accepts 2 parameters:
1417
-fitness: The fitness values of the solutions in the current population.
1518
-num_parents: The number of parents to be selected.
16-
It returns an array of the selected parents.
19+
It returns:
20+
-An array of the selected parents.
21+
-The indices of the selected solutions.
1722
"""
1823

19-
fitness_sorted = sorted(range(len(fitness)), key=lambda k: fitness[k])
20-
fitness_sorted.reverse()
24+
# Return the indices of the sorted solutions (all solutions in the population).
25+
# This function works with both single- and multi-objective optimization problems.
26+
fitness_sorted = nsga2.sort_solutions_nsga2(fitness=fitness)
2127

2228
# Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.
2329
if self.gene_type_single == True:
@@ -38,11 +44,14 @@ def rank_selection(self, fitness, num_parents):
3844
It accepts 2 parameters:
3945
-fitness: The fitness values of the solutions in the current population.
4046
-num_parents: The number of parents to be selected.
41-
It returns an array of the selected parents.
47+
It returns:
48+
-An array of the selected parents.
49+
-The indices of the selected solutions.
4250
"""
4351

44-
# This has the index of each solution in the population.
45-
fitness_sorted = sorted(range(len(fitness)), key=lambda k: fitness[k])
52+
# Return the indices of the sorted solutions (all solutions in the population).
53+
# This function works with both single- and multi-objective optimization problems.
54+
fitness_sorted = nsga2.sort_solutions_nsga2(fitness=fitness)
4655

4756
# Rank the solutions based on their fitness. The worst is gives the rank 1. The best has the rank N.
4857
rank = numpy.arange(1, self.sol_per_pop+1)
@@ -74,7 +83,9 @@ def random_selection(self, fitness, num_parents):
7483
It accepts 2 parameters:
7584
-fitness: The fitness values of the solutions in the current population.
7685
-num_parents: The number of parents to be selected.
77-
It returns an array of the selected parents.
86+
It returns:
87+
-An array of the selected parents.
88+
-The indices of the selected solutions.
7889
"""
7990

8091
if self.gene_type_single == True:
@@ -96,35 +107,68 @@ def tournament_selection(self, fitness, num_parents):
96107
It accepts 2 parameters:
97108
-fitness: The fitness values of the solutions in the current population.
98109
-num_parents: The number of parents to be selected.
99-
It returns an array of the selected parents.
110+
It returns:
111+
-An array of the selected parents.
112+
-The indices of the selected solutions.
100113
"""
101-
114+
115+
# Return the indices of the sorted solutions (all solutions in the population).
116+
# This function works with both single- and multi-objective optimization problems.
117+
fitness_sorted = nsga2.sort_solutions_nsga2(fitness=fitness)
118+
102119
if self.gene_type_single == True:
103120
parents = numpy.empty((num_parents, self.population.shape[1]), dtype=self.gene_type[0])
104121
else:
105122
parents = numpy.empty((num_parents, self.population.shape[1]), dtype=object)
106-
123+
107124
parents_indices = []
108-
125+
109126
for parent_num in range(num_parents):
127+
# Generate random indices for the candiadate solutions.
110128
rand_indices = numpy.random.randint(low=0.0, high=len(fitness), size=self.K_tournament)
111-
K_fitnesses = fitness[rand_indices]
112-
selected_parent_idx = numpy.where(K_fitnesses == numpy.max(K_fitnesses))[0][0]
129+
# K_fitnesses = fitness[rand_indices]
130+
# selected_parent_idx = numpy.where(K_fitnesses == numpy.max(K_fitnesses))[0][0]
131+
132+
# Find the rank of the candidate solutions. The lower the rank, the better the solution.
133+
rand_indices_rank = [fitness_sorted.index(rand_idx) for rand_idx in rand_indices]
134+
# Select the solution with the lowest rank as a parent.
135+
selected_parent_idx = rand_indices_rank.index(min(rand_indices_rank))
136+
137+
# Append the index of the selected parent.
113138
parents_indices.append(rand_indices[selected_parent_idx])
139+
# Insert the selected parent.
114140
parents[parent_num, :] = self.population[rand_indices[selected_parent_idx], :].copy()
115-
141+
116142
return parents, numpy.array(parents_indices)
117-
143+
118144
def roulette_wheel_selection(self, fitness, num_parents):
119145

120146
"""
121147
Selects the parents using the roulette wheel selection technique. Later, these parents will mate to produce the offspring.
122148
It accepts 2 parameters:
123149
-fitness: The fitness values of the solutions in the current population.
124150
-num_parents: The number of parents to be selected.
125-
It returns an array of the selected parents.
151+
It returns:
152+
-An array of the selected parents.
153+
-The indices of the selected solutions.
126154
"""
127-
155+
156+
## Make edits to work with multi-objective optimization.
157+
## The objective is to convert the fitness from M-D array to just 1D array.
158+
## There are 2 ways:
159+
# 1) By summing the fitness values of each solution.
160+
# 2) By using only 1 objective to create the roulette wheel and excluding the others.
161+
162+
# Take the sum of the fitness values of each solution.
163+
if len(fitness.shape) > 1:
164+
# Multi-objective optimization problem.
165+
# Sum the fitness values of each solution to reduce the fitness from M-D array to just 1D array.
166+
fitness = numpy.sum(fitness, axis=1)
167+
else:
168+
# Single-objective optimization problem.
169+
pass
170+
171+
# Reaching this step extends that fitness is a 1D array.
128172
fitness_sum = numpy.sum(fitness)
129173
if fitness_sum == 0:
130174
self.logger.error("Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.")
@@ -170,7 +214,9 @@ def wheel_cumulative_probs(self, probs, num_parents):
170214
probs_start[min_probs_idx] = curr
171215
curr = curr + probs[min_probs_idx]
172216
probs_end[min_probs_idx] = curr
173-
probs[min_probs_idx] = 99999999999
217+
# Replace 99999999999 by float('inf')
218+
# probs[min_probs_idx] = 99999999999
219+
probs[min_probs_idx] = float('inf')
174220

175221
# Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.
176222
if self.gene_type_single == True:
@@ -187,14 +233,34 @@ def stochastic_universal_selection(self, fitness, num_parents):
187233
It accepts 2 parameters:
188234
-fitness: The fitness values of the solutions in the current population.
189235
-num_parents: The number of parents to be selected.
190-
It returns an array of the selected parents.
236+
It returns:
237+
-An array of the selected parents.
238+
-The indices of the selected solutions.
191239
"""
192240

241+
## Make edits to work with multi-objective optimization.
242+
## The objective is to convert the fitness from M-D array to just 1D array.
243+
## There are 2 ways:
244+
# 1) By summing the fitness values of each solution.
245+
# 2) By using only 1 objective to create the roulette wheel and excluding the others.
246+
247+
# Take the sum of the fitness values of each solution.
248+
if len(fitness.shape) > 1:
249+
# Multi-objective optimization problem.
250+
# Sum the fitness values of each solution to reduce the fitness from M-D array to just 1D array.
251+
fitness = numpy.sum(fitness, axis=1)
252+
else:
253+
# Single-objective optimization problem.
254+
pass
255+
256+
# Reaching this step extends that fitness is a 1D array.
193257
fitness_sum = numpy.sum(fitness)
194258
if fitness_sum == 0:
195259
self.logger.error("Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.")
196260
raise ZeroDivisionError("Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.")
261+
197262
probs = fitness / fitness_sum
263+
198264
probs_start = numpy.zeros(probs.shape, dtype=float) # An array holding the start values of the ranges of probabilities.
199265
probs_end = numpy.zeros(probs.shape, dtype=float) # An array holding the end values of the ranges of probabilities.
200266

@@ -206,7 +272,9 @@ def stochastic_universal_selection(self, fitness, num_parents):
206272
probs_start[min_probs_idx] = curr
207273
curr = curr + probs[min_probs_idx]
208274
probs_end[min_probs_idx] = curr
209-
probs[min_probs_idx] = 99999999999
275+
# Replace 99999999999 by float('inf')
276+
# probs[min_probs_idx] = 99999999999
277+
probs[min_probs_idx] = float('inf')
210278

211279
pointers_distance = 1.0 / self.num_parents_mating # Distance between different pointers.
212280
first_pointer = numpy.random.uniform(low=0.0,
@@ -234,8 +302,6 @@ def stochastic_universal_selection(self, fitness, num_parents):
234302
def tournament_selection_nsga2(self,
235303
fitness,
236304
num_parents
237-
# pareto_fronts,
238-
# solutions_fronts_indices,
239305
):
240306

241307
"""
@@ -253,7 +319,9 @@ def tournament_selection_nsga2(self,
253319
-pareto_fronts: A nested array of all the pareto fronts. Each front has its solutions.
254320
-solutions_fronts_indices: A list of the pareto front index of each solution in the current population.
255321
256-
It returns an array of the selected parents alongside their indices in the population.
322+
It returns:
323+
-An array of the selected parents.
324+
-The indices of the selected solutions.
257325
"""
258326

259327
if self.gene_type_single == True:
@@ -263,19 +331,15 @@ def tournament_selection_nsga2(self,
263331

264332
# The indices of the selected parents.
265333
parents_indices = []
266-
334+
267335
# TODO If there is only a single objective, each pareto front is expected to have only 1 solution.
268-
# TODO Make a test to check for that behaviour.
336+
# TODO Make a test to check for that behaviour and add it to the GitHub actions tests.
269337
pareto_fronts, solutions_fronts_indices = nsga2.non_dominated_sorting(fitness)
270338

271339
# Randomly generate pairs of indices to apply for NSGA-II tournament selection for selecting the parents solutions.
272340
rand_indices = numpy.random.randint(low=0.0,
273341
high=len(solutions_fronts_indices),
274342
size=(num_parents, self.K_tournament))
275-
# rand_indices[0, 0] = 5
276-
# rand_indices[0, 1] = 3
277-
# rand_indices[1, 0] = 1
278-
# rand_indices[1, 1] = 6
279343

280344
for parent_num in range(num_parents):
281345
# Return the indices of the current 2 solutions.
@@ -346,7 +410,7 @@ def tournament_selection_nsga2(self,
346410
else:
347411
# If the random number is >= 0.5, then select the second solution.
348412
selected_parent_idx = current_indices[1]
349-
413+
350414
# Insert the selected parent index.
351415
parents_indices.append(selected_parent_idx)
352416
# Insert the selected parent.
@@ -358,8 +422,6 @@ def tournament_selection_nsga2(self,
358422
def nsga2_selection(self,
359423
fitness,
360424
num_parents
361-
# pareto_fronts,
362-
# solutions_fronts_indices
363425
):
364426

365427
"""
@@ -378,7 +440,9 @@ def nsga2_selection(self,
378440
-pareto_fronts: A nested array of all the pareto fronts. Each front has its solutions.
379441
-solutions_fronts_indices: A list of the pareto front index of each solution in the current population.
380442
381-
It returns an array of the selected parents alongside their indices in the population.
443+
It returns:
444+
-An array of the selected parents.
445+
-The indices of the selected solutions.
382446
"""
383447

384448
if self.gene_type_single == True:

pygad/visualize/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
from pygad.visualize import plot
22

3-
__version__ = "1.0.0"
3+
__version__ = "1.1.0"

0 commit comments

Comments
 (0)