Skip to content

Commit 89cbc76

Browse files
Christoph Lameterhtejun
authored andcommitted
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of them is address calculation via the form &__get_cpu_var(x). This calculates the address for the instance of the percpu variable of the current processor based on an offset. Other use cases are for storing and retrieving data from the current processors percpu area. __get_cpu_var() can be used as an lvalue when writing data or on the right side of an assignment. __get_cpu_var() is defined as : #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) __get_cpu_var() always only does an address determination. However, store and retrieve operations could use a segment prefix (or global register on other platforms) to avoid the address calculation. this_cpu_write() and this_cpu_read() can directly take an offset into a percpu area and use optimized assembly code to read and write per cpu variables. This patch converts __get_cpu_var into either an explicit address calculation using this_cpu_ptr() or into a use of this_cpu operations that use the offset. Thereby address calculations are avoided and less registers are used when code is generated. Transformations done to __get_cpu_var() 1. Determine the address of the percpu instance of the current processor. DEFINE_PER_CPU(int, y); int *x = &__get_cpu_var(y); Converts to int *x = this_cpu_ptr(&y); 2. Same as #1 but this time an array structure is involved. DEFINE_PER_CPU(int, y[20]); int *x = __get_cpu_var(y); Converts to int *x = this_cpu_ptr(y); 3. Retrieve the content of the current processors instance of a per cpu variable. DEFINE_PER_CPU(int, y); int x = __get_cpu_var(y) Converts to int x = __this_cpu_read(y); 4. Retrieve the content of a percpu struct DEFINE_PER_CPU(struct mystruct, y); struct mystruct x = __get_cpu_var(y); Converts to memcpy(&x, this_cpu_ptr(&y), sizeof(x)); 5. Assignment to a per cpu variable DEFINE_PER_CPU(int, y) __get_cpu_var(y) = x; Converts to __this_cpu_write(y, x); 6. Increment/Decrement etc of a per cpu variable DEFINE_PER_CPU(int, y); __get_cpu_var(y)++ Converts to __this_cpu_inc(y) Cc: Thomas Gleixner <tglx@linutronix.de> Cc: x86@kernel.org Acked-by: H. Peter Anvin <hpa@linux.intel.com> Acked-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
1 parent 532d0d0 commit 89cbc76

30 files changed

+147
-147
lines changed

arch/x86/include/asm/debugreg.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,11 +97,11 @@ extern void hw_breakpoint_restore(void);
9797
DECLARE_PER_CPU(int, debug_stack_usage);
9898
static inline void debug_stack_usage_inc(void)
9999
{
100-
__get_cpu_var(debug_stack_usage)++;
100+
__this_cpu_inc(debug_stack_usage);
101101
}
102102
static inline void debug_stack_usage_dec(void)
103103
{
104-
__get_cpu_var(debug_stack_usage)--;
104+
__this_cpu_dec(debug_stack_usage);
105105
}
106106
int is_debug_stack(unsigned long addr);
107107
void debug_stack_set_zero(void);

arch/x86/include/asm/uv/uv_hub.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ struct uv_hub_info_s {
164164
};
165165

166166
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
167-
#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
167+
#define uv_hub_info this_cpu_ptr(&__uv_hub_info)
168168
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
169169

170170
/*

arch/x86/kernel/apb_timer.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ static inline int is_apbt_capable(void)
146146
static int __init apbt_clockevent_register(void)
147147
{
148148
struct sfi_timer_table_entry *mtmr;
149-
struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev);
149+
struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev);
150150

151151
mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
152152
if (mtmr == NULL) {
@@ -200,7 +200,7 @@ void apbt_setup_secondary_clock(void)
200200
if (!cpu)
201201
return;
202202

203-
adev = &__get_cpu_var(cpu_apbt_dev);
203+
adev = this_cpu_ptr(&cpu_apbt_dev);
204204
if (!adev->timer) {
205205
adev->timer = dw_apb_clockevent_init(cpu, adev->name,
206206
APBT_CLOCKEVENT_RATING, adev_virt_addr(adev),

arch/x86/kernel/apic/apic.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -561,7 +561,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
561561
*/
562562
static void setup_APIC_timer(void)
563563
{
564-
struct clock_event_device *levt = &__get_cpu_var(lapic_events);
564+
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
565565

566566
if (this_cpu_has(X86_FEATURE_ARAT)) {
567567
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
@@ -696,7 +696,7 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
696696

697697
static int __init calibrate_APIC_clock(void)
698698
{
699-
struct clock_event_device *levt = &__get_cpu_var(lapic_events);
699+
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
700700
void (*real_handler)(struct clock_event_device *dev);
701701
unsigned long deltaj;
702702
long delta, deltatsc;

arch/x86/kernel/cpu/common.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1198,9 +1198,9 @@ DEFINE_PER_CPU(int, debug_stack_usage);
11981198

11991199
int is_debug_stack(unsigned long addr)
12001200
{
1201-
return __get_cpu_var(debug_stack_usage) ||
1202-
(addr <= __get_cpu_var(debug_stack_addr) &&
1203-
addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
1201+
return __this_cpu_read(debug_stack_usage) ||
1202+
(addr <= __this_cpu_read(debug_stack_addr) &&
1203+
addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
12041204
}
12051205
NOKPROBE_SYMBOL(is_debug_stack);
12061206

arch/x86/kernel/cpu/mcheck/mce-inject.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ static DEFINE_MUTEX(mce_inject_mutex);
8383
static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
8484
{
8585
int cpu = smp_processor_id();
86-
struct mce *m = &__get_cpu_var(injectm);
86+
struct mce *m = this_cpu_ptr(&injectm);
8787
if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
8888
return NMI_DONE;
8989
cpumask_clear_cpu(cpu, mce_inject_cpumask);
@@ -97,7 +97,7 @@ static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
9797
static void mce_irq_ipi(void *info)
9898
{
9999
int cpu = smp_processor_id();
100-
struct mce *m = &__get_cpu_var(injectm);
100+
struct mce *m = this_cpu_ptr(&injectm);
101101

102102
if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
103103
m->inject_flags & MCJ_EXCEPTION) {
@@ -109,7 +109,7 @@ static void mce_irq_ipi(void *info)
109109
/* Inject mce on current CPU */
110110
static int raise_local(void)
111111
{
112-
struct mce *m = &__get_cpu_var(injectm);
112+
struct mce *m = this_cpu_ptr(&injectm);
113113
int context = MCJ_CTX(m->inject_flags);
114114
int ret = 0;
115115
int cpu = m->extcpu;

arch/x86/kernel/cpu/mcheck/mce.c

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -400,7 +400,7 @@ static u64 mce_rdmsrl(u32 msr)
400400

401401
if (offset < 0)
402402
return 0;
403-
return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
403+
return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
404404
}
405405

406406
if (rdmsrl_safe(msr, &v)) {
@@ -422,7 +422,7 @@ static void mce_wrmsrl(u32 msr, u64 v)
422422
int offset = msr_to_offset(msr);
423423

424424
if (offset >= 0)
425-
*(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
425+
*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
426426
return;
427427
}
428428
wrmsrl(msr, v);
@@ -478,7 +478,7 @@ static DEFINE_PER_CPU(struct mce_ring, mce_ring);
478478
/* Runs with CPU affinity in workqueue */
479479
static int mce_ring_empty(void)
480480
{
481-
struct mce_ring *r = &__get_cpu_var(mce_ring);
481+
struct mce_ring *r = this_cpu_ptr(&mce_ring);
482482

483483
return r->start == r->end;
484484
}
@@ -490,7 +490,7 @@ static int mce_ring_get(unsigned long *pfn)
490490

491491
*pfn = 0;
492492
get_cpu();
493-
r = &__get_cpu_var(mce_ring);
493+
r = this_cpu_ptr(&mce_ring);
494494
if (r->start == r->end)
495495
goto out;
496496
*pfn = r->ring[r->start];
@@ -504,7 +504,7 @@ static int mce_ring_get(unsigned long *pfn)
504504
/* Always runs in MCE context with preempt off */
505505
static int mce_ring_add(unsigned long pfn)
506506
{
507-
struct mce_ring *r = &__get_cpu_var(mce_ring);
507+
struct mce_ring *r = this_cpu_ptr(&mce_ring);
508508
unsigned next;
509509

510510
next = (r->end + 1) % MCE_RING_SIZE;
@@ -526,7 +526,7 @@ int mce_available(struct cpuinfo_x86 *c)
526526
static void mce_schedule_work(void)
527527
{
528528
if (!mce_ring_empty())
529-
schedule_work(&__get_cpu_var(mce_work));
529+
schedule_work(this_cpu_ptr(&mce_work));
530530
}
531531

532532
DEFINE_PER_CPU(struct irq_work, mce_irq_work);
@@ -551,7 +551,7 @@ static void mce_report_event(struct pt_regs *regs)
551551
return;
552552
}
553553

554-
irq_work_queue(&__get_cpu_var(mce_irq_work));
554+
irq_work_queue(this_cpu_ptr(&mce_irq_work));
555555
}
556556

557557
/*
@@ -1045,7 +1045,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
10451045

10461046
mce_gather_info(&m, regs);
10471047

1048-
final = &__get_cpu_var(mces_seen);
1048+
final = this_cpu_ptr(&mces_seen);
10491049
*final = m;
10501050

10511051
memset(valid_banks, 0, sizeof(valid_banks));
@@ -1278,22 +1278,22 @@ static unsigned long (*mce_adjust_timer)(unsigned long interval) =
12781278

12791279
static int cmc_error_seen(void)
12801280
{
1281-
unsigned long *v = &__get_cpu_var(mce_polled_error);
1281+
unsigned long *v = this_cpu_ptr(&mce_polled_error);
12821282

12831283
return test_and_clear_bit(0, v);
12841284
}
12851285

12861286
static void mce_timer_fn(unsigned long data)
12871287
{
1288-
struct timer_list *t = &__get_cpu_var(mce_timer);
1288+
struct timer_list *t = this_cpu_ptr(&mce_timer);
12891289
unsigned long iv;
12901290
int notify;
12911291

12921292
WARN_ON(smp_processor_id() != data);
12931293

1294-
if (mce_available(__this_cpu_ptr(&cpu_info))) {
1294+
if (mce_available(this_cpu_ptr(&cpu_info))) {
12951295
machine_check_poll(MCP_TIMESTAMP,
1296-
&__get_cpu_var(mce_poll_banks));
1296+
this_cpu_ptr(&mce_poll_banks));
12971297
mce_intel_cmci_poll();
12981298
}
12991299

@@ -1323,7 +1323,7 @@ static void mce_timer_fn(unsigned long data)
13231323
*/
13241324
void mce_timer_kick(unsigned long interval)
13251325
{
1326-
struct timer_list *t = &__get_cpu_var(mce_timer);
1326+
struct timer_list *t = this_cpu_ptr(&mce_timer);
13271327
unsigned long when = jiffies + interval;
13281328
unsigned long iv = __this_cpu_read(mce_next_interval);
13291329

@@ -1659,7 +1659,7 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
16591659

16601660
static void __mcheck_cpu_init_timer(void)
16611661
{
1662-
struct timer_list *t = &__get_cpu_var(mce_timer);
1662+
struct timer_list *t = this_cpu_ptr(&mce_timer);
16631663
unsigned int cpu = smp_processor_id();
16641664

16651665
setup_timer(t, mce_timer_fn, cpu);
@@ -1702,8 +1702,8 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
17021702
__mcheck_cpu_init_generic();
17031703
__mcheck_cpu_init_vendor(c);
17041704
__mcheck_cpu_init_timer();
1705-
INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
1706-
init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
1705+
INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work);
1706+
init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb);
17071707
}
17081708

17091709
/*
@@ -1955,7 +1955,7 @@ static struct miscdevice mce_chrdev_device = {
19551955
static void __mce_disable_bank(void *arg)
19561956
{
19571957
int bank = *((int *)arg);
1958-
__clear_bit(bank, __get_cpu_var(mce_poll_banks));
1958+
__clear_bit(bank, this_cpu_ptr(mce_poll_banks));
19591959
cmci_disable_bank(bank);
19601960
}
19611961

@@ -2065,7 +2065,7 @@ static void mce_syscore_shutdown(void)
20652065
static void mce_syscore_resume(void)
20662066
{
20672067
__mcheck_cpu_init_generic();
2068-
__mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
2068+
__mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
20692069
}
20702070

20712071
static struct syscore_ops mce_syscore_ops = {
@@ -2080,7 +2080,7 @@ static struct syscore_ops mce_syscore_ops = {
20802080

20812081
static void mce_cpu_restart(void *data)
20822082
{
2083-
if (!mce_available(__this_cpu_ptr(&cpu_info)))
2083+
if (!mce_available(raw_cpu_ptr(&cpu_info)))
20842084
return;
20852085
__mcheck_cpu_init_generic();
20862086
__mcheck_cpu_init_timer();
@@ -2096,14 +2096,14 @@ static void mce_restart(void)
20962096
/* Toggle features for corrected errors */
20972097
static void mce_disable_cmci(void *data)
20982098
{
2099-
if (!mce_available(__this_cpu_ptr(&cpu_info)))
2099+
if (!mce_available(raw_cpu_ptr(&cpu_info)))
21002100
return;
21012101
cmci_clear();
21022102
}
21032103

21042104
static void mce_enable_ce(void *all)
21052105
{
2106-
if (!mce_available(__this_cpu_ptr(&cpu_info)))
2106+
if (!mce_available(raw_cpu_ptr(&cpu_info)))
21072107
return;
21082108
cmci_reenable();
21092109
cmci_recheck();
@@ -2336,7 +2336,7 @@ static void mce_disable_cpu(void *h)
23362336
unsigned long action = *(unsigned long *)h;
23372337
int i;
23382338

2339-
if (!mce_available(__this_cpu_ptr(&cpu_info)))
2339+
if (!mce_available(raw_cpu_ptr(&cpu_info)))
23402340
return;
23412341

23422342
if (!(action & CPU_TASKS_FROZEN))
@@ -2354,7 +2354,7 @@ static void mce_reenable_cpu(void *h)
23542354
unsigned long action = *(unsigned long *)h;
23552355
int i;
23562356

2357-
if (!mce_available(__this_cpu_ptr(&cpu_info)))
2357+
if (!mce_available(raw_cpu_ptr(&cpu_info)))
23582358
return;
23592359

23602360
if (!(action & CPU_TASKS_FROZEN))

arch/x86/kernel/cpu/mcheck/mce_amd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,7 @@ static void amd_threshold_interrupt(void)
310310
* event.
311311
*/
312312
machine_check_poll(MCP_TIMESTAMP,
313-
&__get_cpu_var(mce_poll_banks));
313+
this_cpu_ptr(&mce_poll_banks));
314314

315315
if (high & MASK_OVERFLOW_HI) {
316316
rdmsrl(address, m.misc);

arch/x86/kernel/cpu/mcheck/mce_intel.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ void mce_intel_cmci_poll(void)
8686
{
8787
if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
8888
return;
89-
machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
89+
machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
9090
}
9191

9292
void mce_intel_hcpu_update(unsigned long cpu)
@@ -145,7 +145,7 @@ static void cmci_storm_disable_banks(void)
145145
u64 val;
146146

147147
raw_spin_lock_irqsave(&cmci_discover_lock, flags);
148-
owned = __get_cpu_var(mce_banks_owned);
148+
owned = this_cpu_ptr(mce_banks_owned);
149149
for_each_set_bit(bank, owned, MAX_NR_BANKS) {
150150
rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
151151
val &= ~MCI_CTL2_CMCI_EN;
@@ -195,7 +195,7 @@ static void intel_threshold_interrupt(void)
195195
{
196196
if (cmci_storm_detect())
197197
return;
198-
machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
198+
machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
199199
mce_notify_irq();
200200
}
201201

@@ -206,7 +206,7 @@ static void intel_threshold_interrupt(void)
206206
*/
207207
static void cmci_discover(int banks)
208208
{
209-
unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
209+
unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
210210
unsigned long flags;
211211
int i;
212212
int bios_wrong_thresh = 0;
@@ -228,7 +228,7 @@ static void cmci_discover(int banks)
228228
/* Already owned by someone else? */
229229
if (val & MCI_CTL2_CMCI_EN) {
230230
clear_bit(i, owned);
231-
__clear_bit(i, __get_cpu_var(mce_poll_banks));
231+
__clear_bit(i, this_cpu_ptr(mce_poll_banks));
232232
continue;
233233
}
234234

@@ -252,7 +252,7 @@ static void cmci_discover(int banks)
252252
/* Did the enable bit stick? -- the bank supports CMCI */
253253
if (val & MCI_CTL2_CMCI_EN) {
254254
set_bit(i, owned);
255-
__clear_bit(i, __get_cpu_var(mce_poll_banks));
255+
__clear_bit(i, this_cpu_ptr(mce_poll_banks));
256256
/*
257257
* We are able to set thresholds for some banks that
258258
* had a threshold of 0. This means the BIOS has not
@@ -263,7 +263,7 @@ static void cmci_discover(int banks)
263263
(val & MCI_CTL2_CMCI_THRESHOLD_MASK))
264264
bios_wrong_thresh = 1;
265265
} else {
266-
WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
266+
WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks)));
267267
}
268268
}
269269
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
@@ -284,10 +284,10 @@ void cmci_recheck(void)
284284
unsigned long flags;
285285
int banks;
286286

287-
if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
287+
if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
288288
return;
289289
local_irq_save(flags);
290-
machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
290+
machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
291291
local_irq_restore(flags);
292292
}
293293

@@ -296,12 +296,12 @@ static void __cmci_disable_bank(int bank)
296296
{
297297
u64 val;
298298

299-
if (!test_bit(bank, __get_cpu_var(mce_banks_owned)))
299+
if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
300300
return;
301301
rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
302302
val &= ~MCI_CTL2_CMCI_EN;
303303
wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
304-
__clear_bit(bank, __get_cpu_var(mce_banks_owned));
304+
__clear_bit(bank, this_cpu_ptr(mce_banks_owned));
305305
}
306306

307307
/*

0 commit comments

Comments
 (0)