7
7
8
8
class ParentSelection :
9
9
def steady_state_selection (self , fitness , num_parents ):
10
-
10
+
11
11
"""
12
- Selects the parents using the steady-state selection technique. Later, these parents will mate to produce the offspring.
12
+ Selects the parents using the steady-state selection technique.
13
+ This is by sorting the solutions based on the fitness and select the best ones as parents.
14
+ Later, these parents will mate to produce the offspring.
15
+
13
16
It accepts 2 parameters:
14
17
-fitness: The fitness values of the solutions in the current population.
15
18
-num_parents: The number of parents to be selected.
16
- It returns an array of the selected parents.
19
+ It returns:
20
+ -An array of the selected parents.
21
+ -The indices of the selected solutions.
17
22
"""
18
23
19
- fitness_sorted = sorted (range (len (fitness )), key = lambda k : fitness [k ])
20
- fitness_sorted .reverse ()
24
+ # Return the indices of the sorted solutions (all solutions in the population).
25
+ # This function works with both single- and multi-objective optimization problems.
26
+ fitness_sorted = nsga2 .sort_solutions_nsga2 (fitness = fitness )
21
27
22
28
# Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.
23
29
if self .gene_type_single == True :
@@ -38,11 +44,14 @@ def rank_selection(self, fitness, num_parents):
38
44
It accepts 2 parameters:
39
45
-fitness: The fitness values of the solutions in the current population.
40
46
-num_parents: The number of parents to be selected.
41
- It returns an array of the selected parents.
47
+ It returns:
48
+ -An array of the selected parents.
49
+ -The indices of the selected solutions.
42
50
"""
43
51
44
- # This has the index of each solution in the population.
45
- fitness_sorted = sorted (range (len (fitness )), key = lambda k : fitness [k ])
52
+ # Return the indices of the sorted solutions (all solutions in the population).
53
+ # This function works with both single- and multi-objective optimization problems.
54
+ fitness_sorted = nsga2 .sort_solutions_nsga2 (fitness = fitness )
46
55
47
56
# Rank the solutions based on their fitness. The worst is gives the rank 1. The best has the rank N.
48
57
rank = numpy .arange (1 , self .sol_per_pop + 1 )
@@ -74,7 +83,9 @@ def random_selection(self, fitness, num_parents):
74
83
It accepts 2 parameters:
75
84
-fitness: The fitness values of the solutions in the current population.
76
85
-num_parents: The number of parents to be selected.
77
- It returns an array of the selected parents.
86
+ It returns:
87
+ -An array of the selected parents.
88
+ -The indices of the selected solutions.
78
89
"""
79
90
80
91
if self .gene_type_single == True :
@@ -96,35 +107,68 @@ def tournament_selection(self, fitness, num_parents):
96
107
It accepts 2 parameters:
97
108
-fitness: The fitness values of the solutions in the current population.
98
109
-num_parents: The number of parents to be selected.
99
- It returns an array of the selected parents.
110
+ It returns:
111
+ -An array of the selected parents.
112
+ -The indices of the selected solutions.
100
113
"""
101
-
114
+
115
+ # Return the indices of the sorted solutions (all solutions in the population).
116
+ # This function works with both single- and multi-objective optimization problems.
117
+ fitness_sorted = nsga2 .sort_solutions_nsga2 (fitness = fitness )
118
+
102
119
if self .gene_type_single == True :
103
120
parents = numpy .empty ((num_parents , self .population .shape [1 ]), dtype = self .gene_type [0 ])
104
121
else :
105
122
parents = numpy .empty ((num_parents , self .population .shape [1 ]), dtype = object )
106
-
123
+
107
124
parents_indices = []
108
-
125
+
109
126
for parent_num in range (num_parents ):
127
+ # Generate random indices for the candiadate solutions.
110
128
rand_indices = numpy .random .randint (low = 0.0 , high = len (fitness ), size = self .K_tournament )
111
- K_fitnesses = fitness [rand_indices ]
112
- selected_parent_idx = numpy .where (K_fitnesses == numpy .max (K_fitnesses ))[0 ][0 ]
129
+ # K_fitnesses = fitness[rand_indices]
130
+ # selected_parent_idx = numpy.where(K_fitnesses == numpy.max(K_fitnesses))[0][0]
131
+
132
+ # Find the rank of the candidate solutions. The lower the rank, the better the solution.
133
+ rand_indices_rank = [fitness_sorted .index (rand_idx ) for rand_idx in rand_indices ]
134
+ # Select the solution with the lowest rank as a parent.
135
+ selected_parent_idx = rand_indices_rank .index (min (rand_indices_rank ))
136
+
137
+ # Append the index of the selected parent.
113
138
parents_indices .append (rand_indices [selected_parent_idx ])
139
+ # Insert the selected parent.
114
140
parents [parent_num , :] = self .population [rand_indices [selected_parent_idx ], :].copy ()
115
-
141
+
116
142
return parents , numpy .array (parents_indices )
117
-
143
+
118
144
def roulette_wheel_selection (self , fitness , num_parents ):
119
145
120
146
"""
121
147
Selects the parents using the roulette wheel selection technique. Later, these parents will mate to produce the offspring.
122
148
It accepts 2 parameters:
123
149
-fitness: The fitness values of the solutions in the current population.
124
150
-num_parents: The number of parents to be selected.
125
- It returns an array of the selected parents.
151
+ It returns:
152
+ -An array of the selected parents.
153
+ -The indices of the selected solutions.
126
154
"""
127
-
155
+
156
+ ## Make edits to work with multi-objective optimization.
157
+ ## The objective is to convert the fitness from M-D array to just 1D array.
158
+ ## There are 2 ways:
159
+ # 1) By summing the fitness values of each solution.
160
+ # 2) By using only 1 objective to create the roulette wheel and excluding the others.
161
+
162
+ # Take the sum of the fitness values of each solution.
163
+ if len (fitness .shape ) > 1 :
164
+ # Multi-objective optimization problem.
165
+ # Sum the fitness values of each solution to reduce the fitness from M-D array to just 1D array.
166
+ fitness = numpy .sum (fitness , axis = 1 )
167
+ else :
168
+ # Single-objective optimization problem.
169
+ pass
170
+
171
+ # Reaching this step extends that fitness is a 1D array.
128
172
fitness_sum = numpy .sum (fitness )
129
173
if fitness_sum == 0 :
130
174
self .logger .error ("Cannot proceed because the sum of fitness values is zero. Cannot divide by zero." )
@@ -170,7 +214,9 @@ def wheel_cumulative_probs(self, probs, num_parents):
170
214
probs_start [min_probs_idx ] = curr
171
215
curr = curr + probs [min_probs_idx ]
172
216
probs_end [min_probs_idx ] = curr
173
- probs [min_probs_idx ] = 99999999999
217
+ # Replace 99999999999 by float('inf')
218
+ # probs[min_probs_idx] = 99999999999
219
+ probs [min_probs_idx ] = float ('inf' )
174
220
175
221
# Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.
176
222
if self .gene_type_single == True :
@@ -187,14 +233,34 @@ def stochastic_universal_selection(self, fitness, num_parents):
187
233
It accepts 2 parameters:
188
234
-fitness: The fitness values of the solutions in the current population.
189
235
-num_parents: The number of parents to be selected.
190
- It returns an array of the selected parents.
236
+ It returns:
237
+ -An array of the selected parents.
238
+ -The indices of the selected solutions.
191
239
"""
192
240
241
+ ## Make edits to work with multi-objective optimization.
242
+ ## The objective is to convert the fitness from M-D array to just 1D array.
243
+ ## There are 2 ways:
244
+ # 1) By summing the fitness values of each solution.
245
+ # 2) By using only 1 objective to create the roulette wheel and excluding the others.
246
+
247
+ # Take the sum of the fitness values of each solution.
248
+ if len (fitness .shape ) > 1 :
249
+ # Multi-objective optimization problem.
250
+ # Sum the fitness values of each solution to reduce the fitness from M-D array to just 1D array.
251
+ fitness = numpy .sum (fitness , axis = 1 )
252
+ else :
253
+ # Single-objective optimization problem.
254
+ pass
255
+
256
+ # Reaching this step extends that fitness is a 1D array.
193
257
fitness_sum = numpy .sum (fitness )
194
258
if fitness_sum == 0 :
195
259
self .logger .error ("Cannot proceed because the sum of fitness values is zero. Cannot divide by zero." )
196
260
raise ZeroDivisionError ("Cannot proceed because the sum of fitness values is zero. Cannot divide by zero." )
261
+
197
262
probs = fitness / fitness_sum
263
+
198
264
probs_start = numpy .zeros (probs .shape , dtype = float ) # An array holding the start values of the ranges of probabilities.
199
265
probs_end = numpy .zeros (probs .shape , dtype = float ) # An array holding the end values of the ranges of probabilities.
200
266
@@ -206,7 +272,9 @@ def stochastic_universal_selection(self, fitness, num_parents):
206
272
probs_start [min_probs_idx ] = curr
207
273
curr = curr + probs [min_probs_idx ]
208
274
probs_end [min_probs_idx ] = curr
209
- probs [min_probs_idx ] = 99999999999
275
+ # Replace 99999999999 by float('inf')
276
+ # probs[min_probs_idx] = 99999999999
277
+ probs [min_probs_idx ] = float ('inf' )
210
278
211
279
pointers_distance = 1.0 / self .num_parents_mating # Distance between different pointers.
212
280
first_pointer = numpy .random .uniform (low = 0.0 ,
@@ -234,8 +302,6 @@ def stochastic_universal_selection(self, fitness, num_parents):
234
302
def tournament_selection_nsga2 (self ,
235
303
fitness ,
236
304
num_parents
237
- # pareto_fronts,
238
- # solutions_fronts_indices,
239
305
):
240
306
241
307
"""
@@ -253,7 +319,9 @@ def tournament_selection_nsga2(self,
253
319
-pareto_fronts: A nested array of all the pareto fronts. Each front has its solutions.
254
320
-solutions_fronts_indices: A list of the pareto front index of each solution in the current population.
255
321
256
- It returns an array of the selected parents alongside their indices in the population.
322
+ It returns:
323
+ -An array of the selected parents.
324
+ -The indices of the selected solutions.
257
325
"""
258
326
259
327
if self .gene_type_single == True :
@@ -263,19 +331,15 @@ def tournament_selection_nsga2(self,
263
331
264
332
# The indices of the selected parents.
265
333
parents_indices = []
266
-
334
+
267
335
# TODO If there is only a single objective, each pareto front is expected to have only 1 solution.
268
- # TODO Make a test to check for that behaviour.
336
+ # TODO Make a test to check for that behaviour and add it to the GitHub actions tests .
269
337
pareto_fronts , solutions_fronts_indices = nsga2 .non_dominated_sorting (fitness )
270
338
271
339
# Randomly generate pairs of indices to apply for NSGA-II tournament selection for selecting the parents solutions.
272
340
rand_indices = numpy .random .randint (low = 0.0 ,
273
341
high = len (solutions_fronts_indices ),
274
342
size = (num_parents , self .K_tournament ))
275
- # rand_indices[0, 0] = 5
276
- # rand_indices[0, 1] = 3
277
- # rand_indices[1, 0] = 1
278
- # rand_indices[1, 1] = 6
279
343
280
344
for parent_num in range (num_parents ):
281
345
# Return the indices of the current 2 solutions.
@@ -346,7 +410,7 @@ def tournament_selection_nsga2(self,
346
410
else :
347
411
# If the random number is >= 0.5, then select the second solution.
348
412
selected_parent_idx = current_indices [1 ]
349
-
413
+
350
414
# Insert the selected parent index.
351
415
parents_indices .append (selected_parent_idx )
352
416
# Insert the selected parent.
@@ -358,8 +422,6 @@ def tournament_selection_nsga2(self,
358
422
def nsga2_selection (self ,
359
423
fitness ,
360
424
num_parents
361
- # pareto_fronts,
362
- # solutions_fronts_indices
363
425
):
364
426
365
427
"""
@@ -378,7 +440,9 @@ def nsga2_selection(self,
378
440
-pareto_fronts: A nested array of all the pareto fronts. Each front has its solutions.
379
441
-solutions_fronts_indices: A list of the pareto front index of each solution in the current population.
380
442
381
- It returns an array of the selected parents alongside their indices in the population.
443
+ It returns:
444
+ -An array of the selected parents.
445
+ -The indices of the selected solutions.
382
446
"""
383
447
384
448
if self .gene_type_single == True :
0 commit comments