@@ -1145,34 +1145,81 @@ static void change_pageblock_range(struct page *pageblock_page,
1145
1145
* as fragmentation caused by those allocations polluting movable pageblocks
1146
1146
* is worse than movable allocations stealing from unmovable and reclaimable
1147
1147
* pageblocks.
1148
- *
1149
- * If we claim more than half of the pageblock, change pageblock's migratetype
1150
- * as well.
1151
1148
*/
1152
- static void try_to_steal_freepages (struct zone * zone , struct page * page ,
1153
- int start_type , int fallback_type )
1149
+ static bool can_steal_fallback (unsigned int order , int start_mt )
1150
+ {
1151
+ /*
1152
+ * Leaving this order check is intended, although there is
1153
+ * relaxed order check in next check. The reason is that
1154
+ * we can actually steal whole pageblock if this condition met,
1155
+ * but, below check doesn't guarantee it and that is just heuristic
1156
+ * so could be changed anytime.
1157
+ */
1158
+ if (order >= pageblock_order )
1159
+ return true;
1160
+
1161
+ if (order >= pageblock_order / 2 ||
1162
+ start_mt == MIGRATE_RECLAIMABLE ||
1163
+ start_mt == MIGRATE_UNMOVABLE ||
1164
+ page_group_by_mobility_disabled )
1165
+ return true;
1166
+
1167
+ return false;
1168
+ }
1169
+
1170
+ /*
1171
+ * This function implements actual steal behaviour. If order is large enough,
1172
+ * we can steal whole pageblock. If not, we first move freepages in this
1173
+ * pageblock and check whether half of pages are moved or not. If half of
1174
+ * pages are moved, we can change migratetype of pageblock and permanently
1175
+ * use it's pages as requested migratetype in the future.
1176
+ */
1177
+ static void steal_suitable_fallback (struct zone * zone , struct page * page ,
1178
+ int start_type )
1154
1179
{
1155
1180
int current_order = page_order (page );
1181
+ int pages ;
1156
1182
1157
1183
/* Take ownership for orders >= pageblock_order */
1158
1184
if (current_order >= pageblock_order ) {
1159
1185
change_pageblock_range (page , current_order , start_type );
1160
1186
return ;
1161
1187
}
1162
1188
1163
- if (current_order >= pageblock_order / 2 ||
1164
- start_type == MIGRATE_RECLAIMABLE ||
1165
- start_type == MIGRATE_UNMOVABLE ||
1166
- page_group_by_mobility_disabled ) {
1167
- int pages ;
1189
+ pages = move_freepages_block (zone , page , start_type );
1190
+
1191
+ /* Claim the whole block if over half of it is free */
1192
+ if (pages >= (1 << (pageblock_order - 1 )) ||
1193
+ page_group_by_mobility_disabled )
1194
+ set_pageblock_migratetype (page , start_type );
1195
+ }
1196
+
1197
+ /* Check whether there is a suitable fallback freepage with requested order. */
1198
+ static int find_suitable_fallback (struct free_area * area , unsigned int order ,
1199
+ int migratetype , bool * can_steal )
1200
+ {
1201
+ int i ;
1202
+ int fallback_mt ;
1203
+
1204
+ if (area -> nr_free == 0 )
1205
+ return -1 ;
1206
+
1207
+ * can_steal = false;
1208
+ for (i = 0 ;; i ++ ) {
1209
+ fallback_mt = fallbacks [migratetype ][i ];
1210
+ if (fallback_mt == MIGRATE_RESERVE )
1211
+ break ;
1212
+
1213
+ if (list_empty (& area -> free_list [fallback_mt ]))
1214
+ continue ;
1168
1215
1169
- pages = move_freepages_block (zone , page , start_type );
1216
+ if (can_steal_fallback (order , migratetype ))
1217
+ * can_steal = true;
1170
1218
1171
- /* Claim the whole block if over half of it is free */
1172
- if (pages >= (1 << (pageblock_order - 1 )) ||
1173
- page_group_by_mobility_disabled )
1174
- set_pageblock_migratetype (page , start_type );
1219
+ return fallback_mt ;
1175
1220
}
1221
+
1222
+ return -1 ;
1176
1223
}
1177
1224
1178
1225
/* Remove an element from the buddy allocator from the fallback list */
@@ -1182,53 +1229,45 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
1182
1229
struct free_area * area ;
1183
1230
unsigned int current_order ;
1184
1231
struct page * page ;
1232
+ int fallback_mt ;
1233
+ bool can_steal ;
1185
1234
1186
1235
/* Find the largest possible block of pages in the other list */
1187
1236
for (current_order = MAX_ORDER - 1 ;
1188
1237
current_order >= order && current_order <= MAX_ORDER - 1 ;
1189
1238
-- current_order ) {
1190
- int i ;
1191
- for (i = 0 ;; i ++ ) {
1192
- int migratetype = fallbacks [start_migratetype ][i ];
1193
- int buddy_type = start_migratetype ;
1194
-
1195
- /* MIGRATE_RESERVE handled later if necessary */
1196
- if (migratetype == MIGRATE_RESERVE )
1197
- break ;
1198
-
1199
- area = & (zone -> free_area [current_order ]);
1200
- if (list_empty (& area -> free_list [migratetype ]))
1201
- continue ;
1202
-
1203
- page = list_entry (area -> free_list [migratetype ].next ,
1204
- struct page , lru );
1205
- area -> nr_free -- ;
1206
-
1207
- try_to_steal_freepages (zone , page , start_migratetype ,
1208
- migratetype );
1239
+ area = & (zone -> free_area [current_order ]);
1240
+ fallback_mt = find_suitable_fallback (area , current_order ,
1241
+ start_migratetype , & can_steal );
1242
+ if (fallback_mt == -1 )
1243
+ continue ;
1209
1244
1210
- /* Remove the page from the freelists */
1211
- list_del (& page -> lru );
1212
- rmv_page_order (page );
1245
+ page = list_entry (area -> free_list [fallback_mt ].next ,
1246
+ struct page , lru );
1247
+ if (can_steal )
1248
+ steal_suitable_fallback (zone , page , start_migratetype );
1213
1249
1214
- expand (zone , page , order , current_order , area ,
1215
- buddy_type );
1250
+ /* Remove the page from the freelists */
1251
+ area -> nr_free -- ;
1252
+ list_del (& page -> lru );
1253
+ rmv_page_order (page );
1216
1254
1217
- /*
1218
- * The freepage_migratetype may differ from pageblock's
1219
- * migratetype depending on the decisions in
1220
- * try_to_steal_freepages(). This is OK as long as it
1221
- * does not differ for MIGRATE_CMA pageblocks. For CMA
1222
- * we need to make sure unallocated pages flushed from
1223
- * pcp lists are returned to the correct freelist.
1224
- */
1225
- set_freepage_migratetype (page , buddy_type );
1255
+ expand (zone , page , order , current_order , area ,
1256
+ start_migratetype );
1257
+ /*
1258
+ * The freepage_migratetype may differ from pageblock's
1259
+ * migratetype depending on the decisions in
1260
+ * try_to_steal_freepages(). This is OK as long as it
1261
+ * does not differ for MIGRATE_CMA pageblocks. For CMA
1262
+ * we need to make sure unallocated pages flushed from
1263
+ * pcp lists are returned to the correct freelist.
1264
+ */
1265
+ set_freepage_migratetype (page , start_migratetype );
1226
1266
1227
- trace_mm_page_alloc_extfrag (page , order , current_order ,
1228
- start_migratetype , migratetype );
1267
+ trace_mm_page_alloc_extfrag (page , order , current_order ,
1268
+ start_migratetype , fallback_mt );
1229
1269
1230
- return page ;
1231
- }
1270
+ return page ;
1232
1271
}
1233
1272
1234
1273
return NULL ;
0 commit comments