Skip to content

Commit 97fb7a0

Browse files
author
Ingo Molnar
committed
sched: Clean up and harmonize the coding style of the scheduler code base
A good number of small style inconsistencies have accumulated in the scheduler core, so do a pass over them to harmonize all these details: - fix speling in comments, - use curly braces for multi-line statements, - remove unnecessary parentheses from integer literals, - capitalize consistently, - remove stray newlines, - add comments where necessary, - remove invalid/unnecessary comments, - align structure definitions and other data types vertically, - add missing newlines for increased readability, - fix vertical tabulation where it's misaligned, - harmonize preprocessor conditional block labeling and vertical alignment, - remove line-breaks where they uglify the code, - add newline after local variable definitions, No change in functionality: md5: 1191fa0a890cfa8132156d2959d7e9e2 built-in.o.before.asm 1191fa0a890cfa8132156d2959d7e9e2 built-in.o.after.asm Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent c2e5138 commit 97fb7a0

28 files changed

+706
-710
lines changed

kernel/sched/autogroup.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -168,18 +168,19 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
168168
autogroup_kref_put(prev);
169169
}
170170

171-
/* Allocates GFP_KERNEL, cannot be called under any spinlock */
171+
/* Allocates GFP_KERNEL, cannot be called under any spinlock: */
172172
void sched_autogroup_create_attach(struct task_struct *p)
173173
{
174174
struct autogroup *ag = autogroup_create();
175175

176176
autogroup_move_group(p, ag);
177-
/* drop extra reference added by autogroup_create() */
177+
178+
/* Drop extra reference added by autogroup_create(): */
178179
autogroup_kref_put(ag);
179180
}
180181
EXPORT_SYMBOL(sched_autogroup_create_attach);
181182

182-
/* Cannot be called under siglock. Currently has no users */
183+
/* Cannot be called under siglock. Currently has no users: */
183184
void sched_autogroup_detach(struct task_struct *p)
184185
{
185186
autogroup_move_group(p, &autogroup_default);
@@ -202,7 +203,6 @@ static int __init setup_autogroup(char *str)
202203

203204
return 1;
204205
}
205-
206206
__setup("noautogroup", setup_autogroup);
207207

208208
#ifdef CONFIG_PROC_FS
@@ -224,7 +224,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
224224
if (nice < 0 && !can_nice(current, nice))
225225
return -EPERM;
226226

227-
/* this is a heavy operation taking global locks.. */
227+
/* This is a heavy operation, taking global locks.. */
228228
if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
229229
return -EAGAIN;
230230

@@ -267,4 +267,4 @@ int autogroup_path(struct task_group *tg, char *buf, int buflen)
267267

268268
return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
269269
}
270-
#endif /* CONFIG_SCHED_DEBUG */
270+
#endif

kernel/sched/autogroup.h

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,9 @@
77

88
struct autogroup {
99
/*
10-
* reference doesn't mean how many thread attach to this
11-
* autogroup now. It just stands for the number of task
12-
* could use this autogroup.
10+
* Reference doesn't mean how many threads attach to this
11+
* autogroup now. It just stands for the number of tasks
12+
* which could use this autogroup.
1313
*/
1414
struct kref kref;
1515
struct task_group *tg;
@@ -56,11 +56,9 @@ autogroup_task_group(struct task_struct *p, struct task_group *tg)
5656
return tg;
5757
}
5858

59-
#ifdef CONFIG_SCHED_DEBUG
6059
static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
6160
{
6261
return 0;
6362
}
64-
#endif
6563

6664
#endif /* CONFIG_SCHED_AUTOGROUP */

kernel/sched/clock.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* sched_clock for unstable cpu clocks
2+
* sched_clock() for unstable CPU clocks
33
*
44
* Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
55
*
@@ -11,7 +11,7 @@
1111
* Guillaume Chazarain <guichaz@gmail.com>
1212
*
1313
*
14-
* What:
14+
* What this file implements:
1515
*
1616
* cpu_clock(i) provides a fast (execution time) high resolution
1717
* clock with bounded drift between CPUs. The value of cpu_clock(i)
@@ -26,11 +26,11 @@
2626
* at 0 on boot (but people really shouldn't rely on that).
2727
*
2828
* cpu_clock(i) -- can be used from any context, including NMI.
29-
* local_clock() -- is cpu_clock() on the current cpu.
29+
* local_clock() -- is cpu_clock() on the current CPU.
3030
*
3131
* sched_clock_cpu(i)
3232
*
33-
* How:
33+
* How it is implemented:
3434
*
3535
* The implementation either uses sched_clock() when
3636
* !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
@@ -302,21 +302,21 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
302302
* cmpxchg64 below only protects one readout.
303303
*
304304
* We must reread via sched_clock_local() in the retry case on
305-
* 32bit as an NMI could use sched_clock_local() via the
305+
* 32-bit kernels as an NMI could use sched_clock_local() via the
306306
* tracer and hit between the readout of
307-
* the low32bit and the high 32bit portion.
307+
* the low 32-bit and the high 32-bit portion.
308308
*/
309309
this_clock = sched_clock_local(my_scd);
310310
/*
311-
* We must enforce atomic readout on 32bit, otherwise the
312-
* update on the remote cpu can hit inbetween the readout of
313-
* the low32bit and the high 32bit portion.
311+
* We must enforce atomic readout on 32-bit, otherwise the
312+
* update on the remote CPU can hit inbetween the readout of
313+
* the low 32-bit and the high 32-bit portion.
314314
*/
315315
remote_clock = cmpxchg64(&scd->clock, 0, 0);
316316
#else
317317
/*
318-
* On 64bit the read of [my]scd->clock is atomic versus the
319-
* update, so we can avoid the above 32bit dance.
318+
* On 64-bit kernels the read of [my]scd->clock is atomic versus the
319+
* update, so we can avoid the above 32-bit dance.
320320
*/
321321
sched_clock_local(my_scd);
322322
again:

kernel/sched/core.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
135135
* [L] ->on_rq
136136
* RELEASE (rq->lock)
137137
*
138-
* If we observe the old cpu in task_rq_lock, the acquire of
138+
* If we observe the old CPU in task_rq_lock, the acquire of
139139
* the old rq->lock will fully serialize against the stores.
140140
*
141141
* If we observe the new CPU in task_rq_lock, the acquire will
@@ -1457,7 +1457,7 @@ EXPORT_SYMBOL_GPL(kick_process);
14571457
*
14581458
* - cpu_active must be a subset of cpu_online
14591459
*
1460-
* - on cpu-up we allow per-cpu kthreads on the online && !active cpu,
1460+
* - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
14611461
* see __set_cpus_allowed_ptr(). At this point the newly online
14621462
* CPU isn't yet part of the sched domains, and balancing will not
14631463
* see it.
@@ -3037,7 +3037,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
30373037

30383038
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
30393039
/*
3040-
* 64-bit doesn't need locks to atomically read a 64bit value.
3040+
* 64-bit doesn't need locks to atomically read a 64-bit value.
30413041
* So we have a optimization chance when the task's delta_exec is 0.
30423042
* Reading ->on_cpu is racy, but this is ok.
30433043
*

kernel/sched/cpuacct.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
* (balbir@in.ibm.com).
1919
*/
2020

21-
/* Time spent by the tasks of the cpu accounting group executing in ... */
21+
/* Time spent by the tasks of the CPU accounting group executing in ... */
2222
enum cpuacct_stat_index {
2323
CPUACCT_STAT_USER, /* ... user mode */
2424
CPUACCT_STAT_SYSTEM, /* ... kernel mode */
@@ -35,20 +35,20 @@ struct cpuacct_usage {
3535
u64 usages[CPUACCT_STAT_NSTATS];
3636
};
3737

38-
/* track cpu usage of a group of tasks and its child groups */
38+
/* track CPU usage of a group of tasks and its child groups */
3939
struct cpuacct {
40-
struct cgroup_subsys_state css;
41-
/* cpuusage holds pointer to a u64-type object on every cpu */
42-
struct cpuacct_usage __percpu *cpuusage;
43-
struct kernel_cpustat __percpu *cpustat;
40+
struct cgroup_subsys_state css;
41+
/* cpuusage holds pointer to a u64-type object on every CPU */
42+
struct cpuacct_usage __percpu *cpuusage;
43+
struct kernel_cpustat __percpu *cpustat;
4444
};
4545

4646
static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
4747
{
4848
return css ? container_of(css, struct cpuacct, css) : NULL;
4949
}
5050

51-
/* return cpu accounting group to which this task belongs */
51+
/* Return CPU accounting group to which this task belongs */
5252
static inline struct cpuacct *task_ca(struct task_struct *tsk)
5353
{
5454
return css_ca(task_css(tsk, cpuacct_cgrp_id));
@@ -65,7 +65,7 @@ static struct cpuacct root_cpuacct = {
6565
.cpuusage = &root_cpuacct_cpuusage,
6666
};
6767

68-
/* create a new cpu accounting group */
68+
/* Create a new CPU accounting group */
6969
static struct cgroup_subsys_state *
7070
cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
7171
{
@@ -96,7 +96,7 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
9696
return ERR_PTR(-ENOMEM);
9797
}
9898

99-
/* destroy an existing cpu accounting group */
99+
/* Destroy an existing CPU accounting group */
100100
static void cpuacct_css_free(struct cgroup_subsys_state *css)
101101
{
102102
struct cpuacct *ca = css_ca(css);
@@ -162,7 +162,7 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
162162
#endif
163163
}
164164

165-
/* return total cpu usage (in nanoseconds) of a group */
165+
/* Return total CPU usage (in nanoseconds) of a group */
166166
static u64 __cpuusage_read(struct cgroup_subsys_state *css,
167167
enum cpuacct_stat_index index)
168168
{

kernel/sched/cpudeadline.c

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
* as published by the Free Software Foundation; version 2
1111
* of the License.
1212
*/
13-
1413
#include <linux/gfp.h>
1514
#include <linux/kernel.h>
1615
#include <linux/slab.h>
@@ -147,9 +146,9 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
147146
}
148147

149148
/*
150-
* cpudl_clear - remove a cpu from the cpudl max-heap
149+
* cpudl_clear - remove a CPU from the cpudl max-heap
151150
* @cp: the cpudl max-heap context
152-
* @cpu: the target cpu
151+
* @cpu: the target CPU
153152
*
154153
* Notes: assumes cpu_rq(cpu)->lock is locked
155154
*
@@ -188,8 +187,8 @@ void cpudl_clear(struct cpudl *cp, int cpu)
188187
/*
189188
* cpudl_set - update the cpudl max-heap
190189
* @cp: the cpudl max-heap context
191-
* @cpu: the target cpu
192-
* @dl: the new earliest deadline for this cpu
190+
* @cpu: the target CPU
191+
* @dl: the new earliest deadline for this CPU
193192
*
194193
* Notes: assumes cpu_rq(cpu)->lock is locked
195194
*
@@ -224,7 +223,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
224223
/*
225224
* cpudl_set_freecpu - Set the cpudl.free_cpus
226225
* @cp: the cpudl max-heap context
227-
* @cpu: rd attached cpu
226+
* @cpu: rd attached CPU
228227
*/
229228
void cpudl_set_freecpu(struct cpudl *cp, int cpu)
230229
{
@@ -234,7 +233,7 @@ void cpudl_set_freecpu(struct cpudl *cp, int cpu)
234233
/*
235234
* cpudl_clear_freecpu - Clear the cpudl.free_cpus
236235
* @cp: the cpudl max-heap context
237-
* @cpu: rd attached cpu
236+
* @cpu: rd attached CPU
238237
*/
239238
void cpudl_clear_freecpu(struct cpudl *cp, int cpu)
240239
{

kernel/sched/cpudeadline.h

Lines changed: 10 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,28 @@
11
/* SPDX-License-Identifier: GPL-2.0 */
2-
#ifndef _LINUX_CPUDL_H
3-
#define _LINUX_CPUDL_H
4-
52
#include <linux/sched.h>
63
#include <linux/sched/deadline.h>
74

8-
#define IDX_INVALID -1
5+
#define IDX_INVALID -1
96

107
struct cpudl_item {
11-
u64 dl;
12-
int cpu;
13-
int idx;
8+
u64 dl;
9+
int cpu;
10+
int idx;
1411
};
1512

1613
struct cpudl {
17-
raw_spinlock_t lock;
18-
int size;
19-
cpumask_var_t free_cpus;
20-
struct cpudl_item *elements;
14+
raw_spinlock_t lock;
15+
int size;
16+
cpumask_var_t free_cpus;
17+
struct cpudl_item *elements;
2118
};
2219

23-
2420
#ifdef CONFIG_SMP
25-
int cpudl_find(struct cpudl *cp, struct task_struct *p,
26-
struct cpumask *later_mask);
21+
int cpudl_find(struct cpudl *cp, struct task_struct *p, struct cpumask *later_mask);
2722
void cpudl_set(struct cpudl *cp, int cpu, u64 dl);
2823
void cpudl_clear(struct cpudl *cp, int cpu);
29-
int cpudl_init(struct cpudl *cp);
24+
int cpudl_init(struct cpudl *cp);
3025
void cpudl_set_freecpu(struct cpudl *cp, int cpu);
3126
void cpudl_clear_freecpu(struct cpudl *cp, int cpu);
3227
void cpudl_cleanup(struct cpudl *cp);
3328
#endif /* CONFIG_SMP */
34-
35-
#endif /* _LINUX_CPUDL_H */

0 commit comments

Comments
 (0)