Skip to content

Commit 1f351d7

Browse files
hnaztorvalds
authored andcommitted
sched: sched.h: make rq locking and clock functions available in stats.h
kernel/sched/sched.h includes "stats.h" half-way through the file. The next patch introduces users of sched.h's rq locking functions and update_rq_clock() in kernel/sched/stats.h. Move those definitions up in the file so they are available in stats.h. Link: http://lkml.kernel.org/r/20180828172258.3185-7-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Suren Baghdasaryan <surenb@google.com> Tested-by: Daniel Drake <drake@endlessm.com> Cc: Christopher Lameter <cl@linux.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Johannes Weiner <jweiner@fb.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Enderborg <peter.enderborg@sony.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Shakeel Butt <shakeelb@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vinayak Menon <vinmenon@codeaurora.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 5c54f5b commit 1f351d7

File tree

1 file changed

+82
-82
lines changed

1 file changed

+82
-82
lines changed

kernel/sched/sched.h

Lines changed: 82 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -957,6 +957,8 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
957957
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
958958
#define raw_rq() raw_cpu_ptr(&runqueues)
959959

960+
extern void update_rq_clock(struct rq *rq);
961+
960962
static inline u64 __rq_clock_broken(struct rq *rq)
961963
{
962964
return READ_ONCE(rq->clock);
@@ -1075,6 +1077,86 @@ static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
10751077
#endif
10761078
}
10771079

1080+
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1081+
__acquires(rq->lock);
1082+
1083+
struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1084+
__acquires(p->pi_lock)
1085+
__acquires(rq->lock);
1086+
1087+
static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1088+
__releases(rq->lock)
1089+
{
1090+
rq_unpin_lock(rq, rf);
1091+
raw_spin_unlock(&rq->lock);
1092+
}
1093+
1094+
static inline void
1095+
task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1096+
__releases(rq->lock)
1097+
__releases(p->pi_lock)
1098+
{
1099+
rq_unpin_lock(rq, rf);
1100+
raw_spin_unlock(&rq->lock);
1101+
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1102+
}
1103+
1104+
static inline void
1105+
rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1106+
__acquires(rq->lock)
1107+
{
1108+
raw_spin_lock_irqsave(&rq->lock, rf->flags);
1109+
rq_pin_lock(rq, rf);
1110+
}
1111+
1112+
static inline void
1113+
rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1114+
__acquires(rq->lock)
1115+
{
1116+
raw_spin_lock_irq(&rq->lock);
1117+
rq_pin_lock(rq, rf);
1118+
}
1119+
1120+
static inline void
1121+
rq_lock(struct rq *rq, struct rq_flags *rf)
1122+
__acquires(rq->lock)
1123+
{
1124+
raw_spin_lock(&rq->lock);
1125+
rq_pin_lock(rq, rf);
1126+
}
1127+
1128+
static inline void
1129+
rq_relock(struct rq *rq, struct rq_flags *rf)
1130+
__acquires(rq->lock)
1131+
{
1132+
raw_spin_lock(&rq->lock);
1133+
rq_repin_lock(rq, rf);
1134+
}
1135+
1136+
static inline void
1137+
rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1138+
__releases(rq->lock)
1139+
{
1140+
rq_unpin_lock(rq, rf);
1141+
raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
1142+
}
1143+
1144+
static inline void
1145+
rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1146+
__releases(rq->lock)
1147+
{
1148+
rq_unpin_lock(rq, rf);
1149+
raw_spin_unlock_irq(&rq->lock);
1150+
}
1151+
1152+
static inline void
1153+
rq_unlock(struct rq *rq, struct rq_flags *rf)
1154+
__releases(rq->lock)
1155+
{
1156+
rq_unpin_lock(rq, rf);
1157+
raw_spin_unlock(&rq->lock);
1158+
}
1159+
10781160
#ifdef CONFIG_NUMA
10791161
enum numa_topology_type {
10801162
NUMA_DIRECT,
@@ -1717,8 +1799,6 @@ static inline void sub_nr_running(struct rq *rq, unsigned count)
17171799
sched_update_tick_dependency(rq);
17181800
}
17191801

1720-
extern void update_rq_clock(struct rq *rq);
1721-
17221802
extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
17231803
extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
17241804

@@ -1783,86 +1863,6 @@ unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
17831863
#endif
17841864
#endif
17851865

1786-
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1787-
__acquires(rq->lock);
1788-
1789-
struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1790-
__acquires(p->pi_lock)
1791-
__acquires(rq->lock);
1792-
1793-
static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1794-
__releases(rq->lock)
1795-
{
1796-
rq_unpin_lock(rq, rf);
1797-
raw_spin_unlock(&rq->lock);
1798-
}
1799-
1800-
static inline void
1801-
task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1802-
__releases(rq->lock)
1803-
__releases(p->pi_lock)
1804-
{
1805-
rq_unpin_lock(rq, rf);
1806-
raw_spin_unlock(&rq->lock);
1807-
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1808-
}
1809-
1810-
static inline void
1811-
rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1812-
__acquires(rq->lock)
1813-
{
1814-
raw_spin_lock_irqsave(&rq->lock, rf->flags);
1815-
rq_pin_lock(rq, rf);
1816-
}
1817-
1818-
static inline void
1819-
rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1820-
__acquires(rq->lock)
1821-
{
1822-
raw_spin_lock_irq(&rq->lock);
1823-
rq_pin_lock(rq, rf);
1824-
}
1825-
1826-
static inline void
1827-
rq_lock(struct rq *rq, struct rq_flags *rf)
1828-
__acquires(rq->lock)
1829-
{
1830-
raw_spin_lock(&rq->lock);
1831-
rq_pin_lock(rq, rf);
1832-
}
1833-
1834-
static inline void
1835-
rq_relock(struct rq *rq, struct rq_flags *rf)
1836-
__acquires(rq->lock)
1837-
{
1838-
raw_spin_lock(&rq->lock);
1839-
rq_repin_lock(rq, rf);
1840-
}
1841-
1842-
static inline void
1843-
rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1844-
__releases(rq->lock)
1845-
{
1846-
rq_unpin_lock(rq, rf);
1847-
raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
1848-
}
1849-
1850-
static inline void
1851-
rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1852-
__releases(rq->lock)
1853-
{
1854-
rq_unpin_lock(rq, rf);
1855-
raw_spin_unlock_irq(&rq->lock);
1856-
}
1857-
1858-
static inline void
1859-
rq_unlock(struct rq *rq, struct rq_flags *rf)
1860-
__releases(rq->lock)
1861-
{
1862-
rq_unpin_lock(rq, rf);
1863-
raw_spin_unlock(&rq->lock);
1864-
}
1865-
18661866
#ifdef CONFIG_SMP
18671867
#ifdef CONFIG_PREEMPT
18681868

0 commit comments

Comments
 (0)