@@ -74,6 +74,22 @@ void __init hibernate_image_size_init(void)
74
74
*/
75
75
struct pbe * restore_pblist ;
76
76
77
+ /* struct linked_page is used to build chains of pages */
78
+
79
+ #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
80
+
81
+ struct linked_page {
82
+ struct linked_page * next ;
83
+ char data [LINKED_PAGE_DATA_SIZE ];
84
+ } __packed ;
85
+
86
+ /*
87
+ * List of "safe" pages (ie. pages that were not used by the image kernel
88
+ * before hibernation) that may be used as temporary storage for image kernel
89
+ * memory contents.
90
+ */
91
+ static struct linked_page * safe_pages_list ;
92
+
77
93
/* Pointer to an auxiliary buffer (1 page) */
78
94
static void * buffer ;
79
95
@@ -113,9 +129,21 @@ static void *get_image_page(gfp_t gfp_mask, int safe_needed)
113
129
return res ;
114
130
}
115
131
132
+ static void * __get_safe_page (gfp_t gfp_mask )
133
+ {
134
+ if (safe_pages_list ) {
135
+ void * ret = safe_pages_list ;
136
+
137
+ safe_pages_list = safe_pages_list -> next ;
138
+ memset (ret , 0 , PAGE_SIZE );
139
+ return ret ;
140
+ }
141
+ return get_image_page (gfp_mask , PG_SAFE );
142
+ }
143
+
116
144
unsigned long get_safe_page (gfp_t gfp_mask )
117
145
{
118
- return (unsigned long )get_image_page (gfp_mask , PG_SAFE );
146
+ return (unsigned long )__get_safe_page (gfp_mask );
119
147
}
120
148
121
149
static struct page * alloc_image_page (gfp_t gfp_mask )
@@ -150,15 +178,6 @@ static inline void free_image_page(void *addr, int clear_nosave_free)
150
178
__free_page (page );
151
179
}
152
180
153
- /* struct linked_page is used to build chains of pages */
154
-
155
- #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
156
-
157
- struct linked_page {
158
- struct linked_page * next ;
159
- char data [LINKED_PAGE_DATA_SIZE ];
160
- } __packed ;
161
-
162
181
static inline void
163
182
free_list_of_pages (struct linked_page * list , int clear_page_nosave )
164
183
{
@@ -208,7 +227,8 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
208
227
if (LINKED_PAGE_DATA_SIZE - ca -> used_space < size ) {
209
228
struct linked_page * lp ;
210
229
211
- lp = get_image_page (ca -> gfp_mask , ca -> safe_needed );
230
+ lp = ca -> safe_needed ? __get_safe_page (ca -> gfp_mask ) :
231
+ get_image_page (ca -> gfp_mask , PG_ANY );
212
232
if (!lp )
213
233
return NULL ;
214
234
@@ -2104,11 +2124,6 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2104
2124
return 0 ;
2105
2125
}
2106
2126
2107
- /* List of "safe" pages that may be used to store data loaded from the suspend
2108
- * image
2109
- */
2110
- static struct linked_page * safe_pages_list ;
2111
-
2112
2127
#ifdef CONFIG_HIGHMEM
2113
2128
/* struct highmem_pbe is used for creating the list of highmem pages that
2114
2129
* should be restored atomically during the resume from disk, because the page
@@ -2334,7 +2349,7 @@ static int
2334
2349
prepare_image (struct memory_bitmap * new_bm , struct memory_bitmap * bm )
2335
2350
{
2336
2351
unsigned int nr_pages , nr_highmem ;
2337
- struct linked_page * sp_list , * lp ;
2352
+ struct linked_page * lp ;
2338
2353
int error ;
2339
2354
2340
2355
/* If there is no highmem, the buffer will not be necessary */
@@ -2362,9 +2377,9 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2362
2377
* NOTE: This way we make sure there will be enough safe pages for the
2363
2378
* chain_alloc() in get_buffer(). It is a bit wasteful, but
2364
2379
* nr_copy_pages cannot be greater than 50% of the memory anyway.
2380
+ *
2381
+ * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2365
2382
*/
2366
- sp_list = NULL ;
2367
- /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2368
2383
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages ;
2369
2384
nr_pages = DIV_ROUND_UP (nr_pages , PBES_PER_LINKED_PAGE );
2370
2385
while (nr_pages > 0 ) {
@@ -2373,12 +2388,11 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2373
2388
error = - ENOMEM ;
2374
2389
goto Free ;
2375
2390
}
2376
- lp -> next = sp_list ;
2377
- sp_list = lp ;
2391
+ lp -> next = safe_pages_list ;
2392
+ safe_pages_list = lp ;
2378
2393
nr_pages -- ;
2379
2394
}
2380
2395
/* Preallocate memory for the image */
2381
- safe_pages_list = NULL ;
2382
2396
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages ;
2383
2397
while (nr_pages > 0 ) {
2384
2398
lp = (struct linked_page * )get_zeroed_page (GFP_ATOMIC );
@@ -2396,12 +2410,6 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2396
2410
swsusp_set_page_free (virt_to_page (lp ));
2397
2411
nr_pages -- ;
2398
2412
}
2399
- /* Free the reserved safe pages so that chain_alloc() can use them */
2400
- while (sp_list ) {
2401
- lp = sp_list -> next ;
2402
- free_image_page (sp_list , PG_UNSAFE_CLEAR );
2403
- sp_list = lp ;
2404
- }
2405
2413
return 0 ;
2406
2414
2407
2415
Free :
@@ -2491,6 +2499,8 @@ int snapshot_write_next(struct snapshot_handle *handle)
2491
2499
if (error )
2492
2500
return error ;
2493
2501
2502
+ safe_pages_list = NULL ;
2503
+
2494
2504
error = memory_bm_create (& copy_bm , GFP_ATOMIC , PG_ANY );
2495
2505
if (error )
2496
2506
return error ;
0 commit comments