Skip to content

Commit a97673a

Browse files
author
Ingo Molnar
committed
x86: Fix various typos in comments
Go over arch/x86/ and fix common typos in comments, and a typo in an actual function argument name. No change in functionality intended. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent df60673 commit a97673a

File tree

26 files changed

+28
-28
lines changed

26 files changed

+28
-28
lines changed

arch/x86/crypto/cast5_avx_glue.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Glue Code for the AVX assembler implemention of the Cast5 Cipher
2+
* Glue Code for the AVX assembler implementation of the Cast5 Cipher
33
*
44
* Copyright (C) 2012 Johannes Goetzfried
55
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>

arch/x86/crypto/cast6_avx_glue.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Glue Code for the AVX assembler implemention of the Cast6 Cipher
2+
* Glue Code for the AVX assembler implementation of the Cast6 Cipher
33
*
44
* Copyright (C) 2012 Johannes Goetzfried
55
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>

arch/x86/entry/common.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
140140
/*
141141
* In order to return to user mode, we need to have IRQs off with
142142
* none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
143-
* can be set at any time on preemptable kernels if we have IRQs on,
143+
* can be set at any time on preemptible kernels if we have IRQs on,
144144
* so we need to loop. Disabling preemption wouldn't help: doing the
145145
* work to clear some of the flags can sleep.
146146
*/

arch/x86/entry/vdso/vma.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
261261
* abusing from userspace install_speciall_mapping, which may
262262
* not do accounting and rlimit right.
263263
* We could search vma near context.vdso, but it's a slowpath,
264-
* so let's explicitely check all VMAs to be completely sure.
264+
* so let's explicitly check all VMAs to be completely sure.
265265
*/
266266
for (vma = mm->mmap; vma; vma = vma->vm_next) {
267267
if (vma_is_special_mapping(vma, &vdso_mapping) ||

arch/x86/events/intel/bts.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -589,7 +589,7 @@ static __init int bts_init(void)
589589
* the AUX buffer.
590590
*
591591
* However, since this driver supports per-CPU and per-task inherit
592-
* we cannot use the user mapping since it will not be availble
592+
* we cannot use the user mapping since it will not be available
593593
* if we're not running the owning process.
594594
*
595595
* With PTI we can't use the kernal map either, because its not

arch/x86/events/intel/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1930,7 +1930,7 @@ static void intel_pmu_enable_all(int added)
19301930
* in sequence on the same PMC or on different PMCs.
19311931
*
19321932
* In practise it appears some of these events do in fact count, and
1933-
* we need to programm all 4 events.
1933+
* we need to program all 4 events.
19341934
*/
19351935
static void intel_pmu_nhm_workaround(void)
19361936
{

arch/x86/events/intel/ds.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1199,7 +1199,7 @@ static void setup_pebs_sample_data(struct perf_event *event,
11991199
/*
12001200
* We must however always use iregs for the unwinder to stay sane; the
12011201
* record BP,SP,IP can point into thin air when the record is from a
1202-
* previous PMI context or an (I)RET happend between the record and
1202+
* previous PMI context or an (I)RET happened between the record and
12031203
* PMI.
12041204
*/
12051205
if (sample_type & PERF_SAMPLE_CALLCHAIN)

arch/x86/events/intel/p4.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1259,7 +1259,7 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
12591259
}
12601260
/*
12611261
* Perf does test runs to see if a whole group can be assigned
1262-
* together succesfully. There can be multiple rounds of this.
1262+
* together successfully. There can be multiple rounds of this.
12631263
* Unfortunately, p4_pmu_swap_config_ts touches the hwc->config
12641264
* bits, such that the next round of group assignments will
12651265
* cause the above p4_should_swap_ts to pass instead of fail.

arch/x86/include/asm/alternative.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
167167
/*
168168
* Alternative inline assembly with input.
169169
*
170-
* Pecularities:
170+
* Peculiarities:
171171
* No memory clobber here.
172172
* Argument numbers start with 1.
173173
* Best is to use constraints that are fixed size (like (%1) ... "r")

arch/x86/include/asm/cmpxchg.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
88

99
/*
10-
* Non-existant functions to indicate usage errors at link time
10+
* Non-existent functions to indicate usage errors at link time
1111
* (or compile-time if the compiler implements __compiletime_error().
1212
*/
1313
extern void __xchg_wrong_size(void)

arch/x86/include/asm/efi.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
* This is the main reason why we're doing stable VA mappings for RT
2020
* services.
2121
*
22-
* This flag is used in conjuction with a chicken bit called
22+
* This flag is used in conjunction with a chicken bit called
2323
* "efi=old_map" which can be used as a fallback to the old runtime
2424
* services mapping method in case there's some b0rkage with a
2525
* particular EFI implementation (haha, it is hard to hold up the

arch/x86/kernel/acpi/boot.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -848,7 +848,7 @@ EXPORT_SYMBOL(acpi_unregister_ioapic);
848848
/**
849849
* acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base
850850
* has been registered
851-
* @handle: ACPI handle of the IOAPIC deivce
851+
* @handle: ACPI handle of the IOAPIC device
852852
* @gsi_base: GSI base associated with the IOAPIC
853853
*
854854
* Assume caller holds some type of lock to serialize acpi_ioapic_registered()

arch/x86/kernel/cpu/mcheck/mce.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -686,7 +686,7 @@ DEFINE_PER_CPU(unsigned, mce_poll_count);
686686
* errors here. However this would be quite problematic --
687687
* we would need to reimplement the Monarch handling and
688688
* it would mess up the exclusion between exception handler
689-
* and poll hander -- * so we skip this for now.
689+
* and poll handler -- * so we skip this for now.
690690
* These cases should not happen anyways, or only when the CPU
691691
* is already totally * confused. In this case it's likely it will
692692
* not fully execute the machine check handler either.

arch/x86/kernel/crash_dump_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
6262

6363
/**
6464
* copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the
65-
* memory with the encryption mask set to accomodate kdump on SME-enabled
65+
* memory with the encryption mask set to accommodate kdump on SME-enabled
6666
* machines.
6767
*/
6868
ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,

arch/x86/kernel/process_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -684,7 +684,7 @@ void set_personality_64bit(void)
684684
/* TBD: overwrites user setup. Should have two bits.
685685
But 64bit processes have always behaved this way,
686686
so it's not too bad. The main problem is just that
687-
32bit childs are affected again. */
687+
32bit children are affected again. */
688688
current->personality &= ~READ_IMPLIES_EXEC;
689689
}
690690

arch/x86/kvm/vmx.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -485,7 +485,7 @@ struct __packed vmcs12 {
485485
/*
486486
* To allow migration of L1 (complete with its L2 guests) between
487487
* machines of different natural widths (32 or 64 bit), we cannot have
488-
* unsigned long fields with no explict size. We use u64 (aliased
488+
* unsigned long fields with no explicit size. We use u64 (aliased
489489
* natural_width) instead. Luckily, x86 is little-endian.
490490
*/
491491
natural_width cr0_guest_host_mask;
@@ -4936,7 +4936,7 @@ static __init int alloc_kvm_area(void)
49364936
* vmcs->revision_id to KVM_EVMCS_VERSION instead of
49374937
* revision_id reported by MSR_IA32_VMX_BASIC.
49384938
*
4939-
* However, even though not explictly documented by
4939+
* However, even though not explicitly documented by
49404940
* TLFS, VMXArea passed as VMXON argument should
49414941
* still be marked with revision_id reported by
49424942
* physical CPU.

arch/x86/kvm/x86.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9280,7 +9280,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
92809280
* with dirty logging disabled in order to eliminate unnecessary GPA
92819281
* logging in PML buffer (and potential PML buffer full VMEXT). This
92829282
* guarantees leaving PML enabled during guest's lifetime won't have
9283-
* any additonal overhead from PML when guest is running with dirty
9283+
* any additional overhead from PML when guest is running with dirty
92849284
* logging disabled for memory slots.
92859285
*
92869286
* kvm_x86_ops->slot_enable_log_dirty is called when switching new slot

arch/x86/mm/pageattr.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1704,7 +1704,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
17041704
} else if (!(in_flag & CPA_PAGES_ARRAY)) {
17051705
/*
17061706
* in_flag of CPA_PAGES_ARRAY implies it is aligned.
1707-
* No need to cehck in that case
1707+
* No need to check in that case
17081708
*/
17091709
if (*addr & ~PAGE_MASK) {
17101710
*addr &= PAGE_MASK;

arch/x86/platform/ce4100/ce4100.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value)
8484
}
8585

8686
static void ce4100_serial_fixup(int port, struct uart_port *up,
87-
u32 *capabilites)
87+
u32 *capabilities)
8888
{
8989
#ifdef CONFIG_EARLY_PRINTK
9090
/*
@@ -111,7 +111,7 @@ static void ce4100_serial_fixup(int port, struct uart_port *up,
111111
up->serial_in = ce4100_mem_serial_in;
112112
up->serial_out = ce4100_mem_serial_out;
113113

114-
*capabilites |= (1 << 12);
114+
*capabilities |= (1 << 12);
115115
}
116116

117117
static __init void sdv_serial_fixup(void)

arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* platform_bcm43xx.c: bcm43xx platform data initilization file
2+
* platform_bcm43xx.c: bcm43xx platform data initialization file
33
*
44
* (C) Copyright 2016 Intel Corporation
55
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>

arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* spidev platform data initilization file
2+
* spidev platform data initialization file
33
*
44
* (C) Copyright 2014, 2016 Intel Corporation
55
* Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>

arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* PCAL9555a platform data initilization file
2+
* PCAL9555a platform data initialization file
33
*
44
* Copyright (C) 2016, Intel Corporation
55
*

arch/x86/platform/intel/iosf_mbi.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
*
1414
*
1515
* The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
16-
* mailbox interface (MBI) to communicate with mutiple devices. This
16+
* mailbox interface (MBI) to communicate with multiple devices. This
1717
* driver implements access to this interface for those platforms that can
1818
* enumerate the device using PCI.
1919
*/

arch/x86/platform/olpc/olpc-xo1-sci.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ static void detect_lid_state(void)
109109
* the edge detector hookup on the gpio inputs on the geode is
110110
* odd, to say the least. See http://dev.laptop.org/ticket/5703
111111
* for details, but in a nutshell: we don't use the edge
112-
* detectors. instead, we make use of an anomoly: with the both
112+
* detectors. instead, we make use of an anomaly: with the both
113113
* edge detectors turned off, we still get an edge event on a
114114
* positive edge transition. to take advantage of this, we use the
115115
* front-end inverter to ensure that that's the edge we're always

arch/x86/platform/uv/uv_nmi.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -560,7 +560,7 @@ static inline void uv_clear_nmi(int cpu)
560560
}
561561
}
562562

563-
/* Ping non-responding CPU's attemping to force them into the NMI handler */
563+
/* Ping non-responding CPU's attempting to force them into the NMI handler */
564564
static void uv_nmi_nr_cpus_ping(void)
565565
{
566566
int cpu;

arch/x86/xen/setup.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -493,7 +493,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
493493
* The remap information (which mfn remap to which pfn) is contained in the
494494
* to be remapped memory itself in a linked list anchored at xen_remap_mfn.
495495
* This scheme allows to remap the different chunks in arbitrary order while
496-
* the resulting mapping will be independant from the order.
496+
* the resulting mapping will be independent from the order.
497497
*/
498498
void __init xen_remap_memory(void)
499499
{

0 commit comments

Comments
 (0)