Skip to content

Commit 254a1a2

Browse files
committed
Merge branch 'netdev-page_frag_alloc-fixes'
Alexander Duyck says: ==================== Address recent issues found in netdev page_frag_alloc usage This patch set addresses a couple of issues that I had pointed out to Jann Horn in response to a recent patch submission. The first issue is that I wanted to avoid the need to read/modify/write the size value in order to generate the value for pagecnt_bias. Instead we can just use a fixed constant which reduces the need for memory read operations and the overall number of instructions to update the pagecnt bias values. The other, and more important issue is, that apparently we were letting tun access the napi_alloc_cache indirectly through netdev_alloc_frag and as a result letting it create unaligned accesses via unaligned allocations. In order to prevent this I have added a call to SKB_DATA_ALIGN for the fragsz field so that we will keep the offset in the napi_alloc_cache SMP_CACHE_BYTES aligned. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents e09c6a4 + 3bed3cc commit 254a1a2

File tree

2 files changed

+8
-4
lines changed

2 files changed

+8
-4
lines changed

mm/page_alloc.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4675,11 +4675,11 @@ void *page_frag_alloc(struct page_frag_cache *nc,
46754675
/* Even if we own the page, we do not use atomic_set().
46764676
* This would break get_page_unless_zero() users.
46774677
*/
4678-
page_ref_add(page, size);
4678+
page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
46794679

46804680
/* reset page count bias and offset to start of new frag */
46814681
nc->pfmemalloc = page_is_pfmemalloc(page);
4682-
nc->pagecnt_bias = size + 1;
4682+
nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
46834683
nc->offset = size;
46844684
}
46854685

@@ -4695,10 +4695,10 @@ void *page_frag_alloc(struct page_frag_cache *nc,
46954695
size = nc->size;
46964696
#endif
46974697
/* OK, page count is 0, we can safely set it */
4698-
set_page_count(page, size + 1);
4698+
set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
46994699

47004700
/* reset page count bias and offset to start of new frag */
4701-
nc->pagecnt_bias = size + 1;
4701+
nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
47024702
offset = size - fragsz;
47034703
}
47044704

net/core/skbuff.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
356356
*/
357357
void *netdev_alloc_frag(unsigned int fragsz)
358358
{
359+
fragsz = SKB_DATA_ALIGN(fragsz);
360+
359361
return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
360362
}
361363
EXPORT_SYMBOL(netdev_alloc_frag);
@@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
369371

370372
void *napi_alloc_frag(unsigned int fragsz)
371373
{
374+
fragsz = SKB_DATA_ALIGN(fragsz);
375+
372376
return __napi_alloc_frag(fragsz, GFP_ATOMIC);
373377
}
374378
EXPORT_SYMBOL(napi_alloc_frag);

0 commit comments

Comments
 (0)