Skip to content

Commit 5875e59

Browse files
committed
mm/slub: simplify __cmpxchg_double_slab() and slab_[un]lock()
The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab() (through slab_[un]lock()) is unnecessary as bit_spin_lock() disables preemption and that's sufficient on PREEMPT_RT where no allocation/free operation is performed in hardirq context and so can't interrupt the current operation. That means we no longer need the slab_[un]lock() wrappers, so delete them and rename the current __slab_[un]lock() to slab_[un]lock(). Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: David Rientjes <rientjes@google.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
1 parent 4ef3f5a commit 5875e59

File tree

1 file changed

+12
-27
lines changed

1 file changed

+12
-27
lines changed

mm/slub.c

Lines changed: 12 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -446,40 +446,28 @@ slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
446446
/*
447447
* Per slab locking using the pagelock
448448
*/
449-
static __always_inline void __slab_lock(struct slab *slab)
449+
static __always_inline void slab_lock(struct slab *slab)
450450
{
451451
struct page *page = slab_page(slab);
452452

453453
VM_BUG_ON_PAGE(PageTail(page), page);
454454
bit_spin_lock(PG_locked, &page->flags);
455455
}
456456

457-
static __always_inline void __slab_unlock(struct slab *slab)
457+
static __always_inline void slab_unlock(struct slab *slab)
458458
{
459459
struct page *page = slab_page(slab);
460460

461461
VM_BUG_ON_PAGE(PageTail(page), page);
462462
__bit_spin_unlock(PG_locked, &page->flags);
463463
}
464464

465-
static __always_inline void slab_lock(struct slab *slab, unsigned long *flags)
466-
{
467-
if (IS_ENABLED(CONFIG_PREEMPT_RT))
468-
local_irq_save(*flags);
469-
__slab_lock(slab);
470-
}
471-
472-
static __always_inline void slab_unlock(struct slab *slab, unsigned long *flags)
473-
{
474-
__slab_unlock(slab);
475-
if (IS_ENABLED(CONFIG_PREEMPT_RT))
476-
local_irq_restore(*flags);
477-
}
478-
479465
/*
480466
* Interrupts must be disabled (for the fallback code to work right), typically
481-
* by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
482-
* so we disable interrupts as part of slab_[un]lock().
467+
* by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
468+
* part of bit_spin_lock(), is sufficient because the policy is not to allow any
469+
* allocation/ free operation in hardirq context. Therefore nothing can
470+
* interrupt the operation.
483471
*/
484472
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
485473
void *freelist_old, unsigned long counters_old,
@@ -498,18 +486,15 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab
498486
} else
499487
#endif
500488
{
501-
/* init to 0 to prevent spurious warnings */
502-
unsigned long flags = 0;
503-
504-
slab_lock(slab, &flags);
489+
slab_lock(slab);
505490
if (slab->freelist == freelist_old &&
506491
slab->counters == counters_old) {
507492
slab->freelist = freelist_new;
508493
slab->counters = counters_new;
509-
slab_unlock(slab, &flags);
494+
slab_unlock(slab);
510495
return true;
511496
}
512-
slab_unlock(slab, &flags);
497+
slab_unlock(slab);
513498
}
514499

515500
cpu_relax();
@@ -540,16 +525,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
540525
unsigned long flags;
541526

542527
local_irq_save(flags);
543-
__slab_lock(slab);
528+
slab_lock(slab);
544529
if (slab->freelist == freelist_old &&
545530
slab->counters == counters_old) {
546531
slab->freelist = freelist_new;
547532
slab->counters = counters_new;
548-
__slab_unlock(slab);
533+
slab_unlock(slab);
549534
local_irq_restore(flags);
550535
return true;
551536
}
552-
__slab_unlock(slab);
537+
slab_unlock(slab);
553538
local_irq_restore(flags);
554539
}
555540

0 commit comments

Comments
 (0)