Skip to content

Commit 9e31489

Browse files
committed
Merge tag 'openrisc-for-linus' of git://github.com/openrisc/linux
Pull OpenRISC updates from Stafford Horne: "Highlights include: - optimized memset and memcpy routines, ~20% boot time saving - support for cpu idling - adding support for l.swa and l.lwa atomic operations (in spec from 2014) - use atomics to implement: bitops, cmpxchg, futex - the atomics are in preparation for SMP support" * tag 'openrisc-for-linus' of git://github.com/openrisc/linux: (25 commits) openrisc: head: Init r0 to 0 on start openrisc: Export ioremap symbols used by modules arch/openrisc/lib/memcpy.c: use correct OR1200 option openrisc: head: Remove unused strings openrisc: head: Move init strings to rodata section openrisc: entry: Fix delay slot detection openrisc: entry: Whitespace and comment cleanups scripts/checkstack.pl: Add openrisc support MAINTAINERS: Add the openrisc official repository openrisc: Add .gitignore openrisc: Add optimized memcpy routine openrisc: Add optimized memset openrisc: Initial support for the idle state openrisc: Fix the bitmask for the unit present register openrisc: remove unnecessary stddef.h include openrisc: add futex_atomic_* implementations openrisc: add optimized atomic operations openrisc: add cmpxchg and xchg implementations openrisc: add atomic bitops openrisc: add l.lwa/l.swa emulation ...
2 parents f8e6859 + a4d4426 commit 9e31489

File tree

26 files changed

+1064
-187
lines changed

26 files changed

+1064
-187
lines changed

MAINTAINERS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9315,6 +9315,7 @@ OPENRISC ARCHITECTURE
93159315
M: Jonas Bonn <jonas@southpole.se>
93169316
M: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
93179317
M: Stafford Horne <shorne@gmail.com>
9318+
T: git git://github.com/openrisc/linux.git
93189319
L: openrisc@lists.librecores.org
93199320
W: http://openrisc.io
93209321
S: Maintained

arch/openrisc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ config OPENRISC
1212
select HAVE_MEMBLOCK
1313
select GPIOLIB
1414
select HAVE_ARCH_TRACEHOOK
15+
select SPARSE_IRQ
1516
select GENERIC_IRQ_CHIP
1617
select GENERIC_IRQ_PROBE
1718
select GENERIC_IRQ_SHOW

arch/openrisc/TODO.openrisc

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,3 @@ that are due for investigation shortly, i.e. our TODO list:
1010
or1k and this change is slowly trickling through the stack. For the time
1111
being, or32 is equivalent to or1k.
1212

13-
-- Implement optimized version of memcpy and memset

arch/openrisc/include/asm/Kbuild

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11

22
header-y += ucontext.h
33

4-
generic-y += atomic.h
54
generic-y += auxvec.h
65
generic-y += barrier.h
76
generic-y += bitsperlong.h
@@ -10,8 +9,6 @@ generic-y += bugs.h
109
generic-y += cacheflush.h
1110
generic-y += checksum.h
1211
generic-y += clkdev.h
13-
generic-y += cmpxchg-local.h
14-
generic-y += cmpxchg.h
1512
generic-y += current.h
1613
generic-y += device.h
1714
generic-y += div64.h
@@ -22,12 +19,12 @@ generic-y += exec.h
2219
generic-y += fb.h
2320
generic-y += fcntl.h
2421
generic-y += ftrace.h
25-
generic-y += futex.h
2622
generic-y += hardirq.h
2723
generic-y += hw_irq.h
2824
generic-y += ioctl.h
2925
generic-y += ioctls.h
3026
generic-y += ipcbuf.h
27+
generic-y += irq.h
3128
generic-y += irq_regs.h
3229
generic-y += irq_work.h
3330
generic-y += kdebug.h

arch/openrisc/include/asm/atomic.h

Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
/*
2+
* Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3+
*
4+
* This file is licensed under the terms of the GNU General Public License
5+
* version 2. This program is licensed "as is" without any warranty of any
6+
* kind, whether express or implied.
7+
*/
8+
9+
#ifndef __ASM_OPENRISC_ATOMIC_H
10+
#define __ASM_OPENRISC_ATOMIC_H
11+
12+
#include <linux/types.h>
13+
14+
/* Atomically perform op with v->counter and i */
15+
#define ATOMIC_OP(op) \
16+
static inline void atomic_##op(int i, atomic_t *v) \
17+
{ \
18+
int tmp; \
19+
\
20+
__asm__ __volatile__( \
21+
"1: l.lwa %0,0(%1) \n" \
22+
" l." #op " %0,%0,%2 \n" \
23+
" l.swa 0(%1),%0 \n" \
24+
" l.bnf 1b \n" \
25+
" l.nop \n" \
26+
: "=&r"(tmp) \
27+
: "r"(&v->counter), "r"(i) \
28+
: "cc", "memory"); \
29+
}
30+
31+
/* Atomically perform op with v->counter and i, return the result */
32+
#define ATOMIC_OP_RETURN(op) \
33+
static inline int atomic_##op##_return(int i, atomic_t *v) \
34+
{ \
35+
int tmp; \
36+
\
37+
__asm__ __volatile__( \
38+
"1: l.lwa %0,0(%1) \n" \
39+
" l." #op " %0,%0,%2 \n" \
40+
" l.swa 0(%1),%0 \n" \
41+
" l.bnf 1b \n" \
42+
" l.nop \n" \
43+
: "=&r"(tmp) \
44+
: "r"(&v->counter), "r"(i) \
45+
: "cc", "memory"); \
46+
\
47+
return tmp; \
48+
}
49+
50+
/* Atomically perform op with v->counter and i, return orig v->counter */
51+
#define ATOMIC_FETCH_OP(op) \
52+
static inline int atomic_fetch_##op(int i, atomic_t *v) \
53+
{ \
54+
int tmp, old; \
55+
\
56+
__asm__ __volatile__( \
57+
"1: l.lwa %0,0(%2) \n" \
58+
" l." #op " %1,%0,%3 \n" \
59+
" l.swa 0(%2),%1 \n" \
60+
" l.bnf 1b \n" \
61+
" l.nop \n" \
62+
: "=&r"(old), "=&r"(tmp) \
63+
: "r"(&v->counter), "r"(i) \
64+
: "cc", "memory"); \
65+
\
66+
return old; \
67+
}
68+
69+
ATOMIC_OP_RETURN(add)
70+
ATOMIC_OP_RETURN(sub)
71+
72+
ATOMIC_FETCH_OP(add)
73+
ATOMIC_FETCH_OP(sub)
74+
ATOMIC_FETCH_OP(and)
75+
ATOMIC_FETCH_OP(or)
76+
ATOMIC_FETCH_OP(xor)
77+
78+
ATOMIC_OP(and)
79+
ATOMIC_OP(or)
80+
ATOMIC_OP(xor)
81+
82+
#undef ATOMIC_FETCH_OP
83+
#undef ATOMIC_OP_RETURN
84+
#undef ATOMIC_OP
85+
86+
#define atomic_add_return atomic_add_return
87+
#define atomic_sub_return atomic_sub_return
88+
#define atomic_fetch_add atomic_fetch_add
89+
#define atomic_fetch_sub atomic_fetch_sub
90+
#define atomic_fetch_and atomic_fetch_and
91+
#define atomic_fetch_or atomic_fetch_or
92+
#define atomic_fetch_xor atomic_fetch_xor
93+
#define atomic_and atomic_and
94+
#define atomic_or atomic_or
95+
#define atomic_xor atomic_xor
96+
97+
/*
98+
* Atomically add a to v->counter as long as v is not already u.
99+
* Returns the original value at v->counter.
100+
*
101+
* This is often used through atomic_inc_not_zero()
102+
*/
103+
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
104+
{
105+
int old, tmp;
106+
107+
__asm__ __volatile__(
108+
"1: l.lwa %0, 0(%2) \n"
109+
" l.sfeq %0, %4 \n"
110+
" l.bf 2f \n"
111+
" l.add %1, %0, %3 \n"
112+
" l.swa 0(%2), %1 \n"
113+
" l.bnf 1b \n"
114+
" l.nop \n"
115+
"2: \n"
116+
: "=&r"(old), "=&r" (tmp)
117+
: "r"(&v->counter), "r"(a), "r"(u)
118+
: "cc", "memory");
119+
120+
return old;
121+
}
122+
#define __atomic_add_unless __atomic_add_unless
123+
124+
#include <asm-generic/atomic.h>
125+
126+
#endif /* __ASM_OPENRISC_ATOMIC_H */

arch/openrisc/include/asm/bitops.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@
4545
#include <asm-generic/bitops/hweight.h>
4646
#include <asm-generic/bitops/lock.h>
4747

48-
#include <asm-generic/bitops/atomic.h>
48+
#include <asm/bitops/atomic.h>
4949
#include <asm-generic/bitops/non-atomic.h>
5050
#include <asm-generic/bitops/le.h>
5151
#include <asm-generic/bitops/ext2-atomic.h>
Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
/*
2+
* Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3+
*
4+
* This file is licensed under the terms of the GNU General Public License
5+
* version 2. This program is licensed "as is" without any warranty of any
6+
* kind, whether express or implied.
7+
*/
8+
9+
#ifndef __ASM_OPENRISC_BITOPS_ATOMIC_H
10+
#define __ASM_OPENRISC_BITOPS_ATOMIC_H
11+
12+
static inline void set_bit(int nr, volatile unsigned long *addr)
13+
{
14+
unsigned long mask = BIT_MASK(nr);
15+
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
16+
unsigned long tmp;
17+
18+
__asm__ __volatile__(
19+
"1: l.lwa %0,0(%1) \n"
20+
" l.or %0,%0,%2 \n"
21+
" l.swa 0(%1),%0 \n"
22+
" l.bnf 1b \n"
23+
" l.nop \n"
24+
: "=&r"(tmp)
25+
: "r"(p), "r"(mask)
26+
: "cc", "memory");
27+
}
28+
29+
static inline void clear_bit(int nr, volatile unsigned long *addr)
30+
{
31+
unsigned long mask = BIT_MASK(nr);
32+
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
33+
unsigned long tmp;
34+
35+
__asm__ __volatile__(
36+
"1: l.lwa %0,0(%1) \n"
37+
" l.and %0,%0,%2 \n"
38+
" l.swa 0(%1),%0 \n"
39+
" l.bnf 1b \n"
40+
" l.nop \n"
41+
: "=&r"(tmp)
42+
: "r"(p), "r"(~mask)
43+
: "cc", "memory");
44+
}
45+
46+
static inline void change_bit(int nr, volatile unsigned long *addr)
47+
{
48+
unsigned long mask = BIT_MASK(nr);
49+
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
50+
unsigned long tmp;
51+
52+
__asm__ __volatile__(
53+
"1: l.lwa %0,0(%1) \n"
54+
" l.xor %0,%0,%2 \n"
55+
" l.swa 0(%1),%0 \n"
56+
" l.bnf 1b \n"
57+
" l.nop \n"
58+
: "=&r"(tmp)
59+
: "r"(p), "r"(mask)
60+
: "cc", "memory");
61+
}
62+
63+
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
64+
{
65+
unsigned long mask = BIT_MASK(nr);
66+
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
67+
unsigned long old;
68+
unsigned long tmp;
69+
70+
__asm__ __volatile__(
71+
"1: l.lwa %0,0(%2) \n"
72+
" l.or %1,%0,%3 \n"
73+
" l.swa 0(%2),%1 \n"
74+
" l.bnf 1b \n"
75+
" l.nop \n"
76+
: "=&r"(old), "=&r"(tmp)
77+
: "r"(p), "r"(mask)
78+
: "cc", "memory");
79+
80+
return (old & mask) != 0;
81+
}
82+
83+
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
84+
{
85+
unsigned long mask = BIT_MASK(nr);
86+
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
87+
unsigned long old;
88+
unsigned long tmp;
89+
90+
__asm__ __volatile__(
91+
"1: l.lwa %0,0(%2) \n"
92+
" l.and %1,%0,%3 \n"
93+
" l.swa 0(%2),%1 \n"
94+
" l.bnf 1b \n"
95+
" l.nop \n"
96+
: "=&r"(old), "=&r"(tmp)
97+
: "r"(p), "r"(~mask)
98+
: "cc", "memory");
99+
100+
return (old & mask) != 0;
101+
}
102+
103+
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
104+
{
105+
unsigned long mask = BIT_MASK(nr);
106+
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
107+
unsigned long old;
108+
unsigned long tmp;
109+
110+
__asm__ __volatile__(
111+
"1: l.lwa %0,0(%2) \n"
112+
" l.xor %1,%0,%3 \n"
113+
" l.swa 0(%2),%1 \n"
114+
" l.bnf 1b \n"
115+
" l.nop \n"
116+
: "=&r"(old), "=&r"(tmp)
117+
: "r"(p), "r"(mask)
118+
: "cc", "memory");
119+
120+
return (old & mask) != 0;
121+
}
122+
123+
#endif /* __ASM_OPENRISC_BITOPS_ATOMIC_H */

arch/openrisc/include/asm/cmpxchg.h

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
/*
2+
* Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3+
*
4+
* This file is licensed under the terms of the GNU General Public License
5+
* version 2. This program is licensed "as is" without any warranty of any
6+
* kind, whether express or implied.
7+
*/
8+
9+
#ifndef __ASM_OPENRISC_CMPXCHG_H
10+
#define __ASM_OPENRISC_CMPXCHG_H
11+
12+
#include <linux/types.h>
13+
14+
/*
15+
* This function doesn't exist, so you'll get a linker error
16+
* if something tries to do an invalid cmpxchg().
17+
*/
18+
extern void __cmpxchg_called_with_bad_pointer(void);
19+
20+
#define __HAVE_ARCH_CMPXCHG 1
21+
22+
static inline unsigned long
23+
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
24+
{
25+
if (size != 4) {
26+
__cmpxchg_called_with_bad_pointer();
27+
return old;
28+
}
29+
30+
__asm__ __volatile__(
31+
"1: l.lwa %0, 0(%1) \n"
32+
" l.sfeq %0, %2 \n"
33+
" l.bnf 2f \n"
34+
" l.nop \n"
35+
" l.swa 0(%1), %3 \n"
36+
" l.bnf 1b \n"
37+
" l.nop \n"
38+
"2: \n"
39+
: "=&r"(old)
40+
: "r"(ptr), "r"(old), "r"(new)
41+
: "cc", "memory");
42+
43+
return old;
44+
}
45+
46+
#define cmpxchg(ptr, o, n) \
47+
({ \
48+
(__typeof__(*(ptr))) __cmpxchg((ptr), \
49+
(unsigned long)(o), \
50+
(unsigned long)(n), \
51+
sizeof(*(ptr))); \
52+
})
53+
54+
/*
55+
* This function doesn't exist, so you'll get a linker error if
56+
* something tries to do an invalidly-sized xchg().
57+
*/
58+
extern void __xchg_called_with_bad_pointer(void);
59+
60+
static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
61+
int size)
62+
{
63+
if (size != 4) {
64+
__xchg_called_with_bad_pointer();
65+
return val;
66+
}
67+
68+
__asm__ __volatile__(
69+
"1: l.lwa %0, 0(%1) \n"
70+
" l.swa 0(%1), %2 \n"
71+
" l.bnf 1b \n"
72+
" l.nop \n"
73+
: "=&r"(val)
74+
: "r"(ptr), "r"(val)
75+
: "cc", "memory");
76+
77+
return val;
78+
}
79+
80+
#define xchg(ptr, with) \
81+
((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr))))
82+
83+
#endif /* __ASM_OPENRISC_CMPXCHG_H */

0 commit comments

Comments
 (0)