Skip to content

Commit d550bbd

Browse files
committed
Disintegrate asm/system.h for Sparc
Disintegrate asm/system.h for Sparc. Signed-off-by: David Howells <dhowells@redhat.com> cc: sparclinux@vger.kernel.org
1 parent e839ca5 commit d550bbd

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

84 files changed

+684
-669
lines changed

arch/sparc/include/asm/atomic_32.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,9 @@
1313

1414
#include <linux/types.h>
1515

16+
#include <asm/cmpxchg.h>
1617
#include <asm-generic/atomic64.h>
1718

18-
#include <asm/system.h>
1919

2020
#define ATOMIC_INIT(i) { (i) }
2121

arch/sparc/include/asm/atomic_64.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
#define __ARCH_SPARC64_ATOMIC__
99

1010
#include <linux/types.h>
11-
#include <asm/system.h>
11+
#include <asm/cmpxchg.h>
1212

1313
#define ATOMIC_INIT(i) { (i) }
1414
#define ATOMIC64_INIT(i) { (i) }
@@ -85,7 +85,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8585
return c;
8686
}
8787

88-
8988
#define atomic64_cmpxchg(v, o, n) \
9089
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9190
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))

arch/sparc/include/asm/auxio_32.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
#ifndef _SPARC_AUXIO_H
77
#define _SPARC_AUXIO_H
88

9-
#include <asm/system.h>
109
#include <asm/vaddrs.h>
1110

1211
/* This register is an unsigned char in IO space. It does two things.

arch/sparc/include/asm/barrier.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
#ifndef ___ASM_SPARC_BARRIER_H
2+
#define ___ASM_SPARC_BARRIER_H
3+
#if defined(__sparc__) && defined(__arch64__)
4+
#include <asm/barrier_64.h>
5+
#else
6+
#include <asm/barrier_32.h>
7+
#endif
8+
#endif
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
#ifndef __SPARC_BARRIER_H
2+
#define __SPARC_BARRIER_H
3+
4+
/* XXX Change this if we ever use a PSO mode kernel. */
5+
#define mb() __asm__ __volatile__ ("" : : : "memory")
6+
#define rmb() mb()
7+
#define wmb() mb()
8+
#define read_barrier_depends() do { } while(0)
9+
#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
10+
#define smp_mb() __asm__ __volatile__("":::"memory")
11+
#define smp_rmb() __asm__ __volatile__("":::"memory")
12+
#define smp_wmb() __asm__ __volatile__("":::"memory")
13+
#define smp_read_barrier_depends() do { } while(0)
14+
15+
#endif /* !(__SPARC_BARRIER_H) */
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
#ifndef __SPARC64_BARRIER_H
2+
#define __SPARC64_BARRIER_H
3+
4+
/* These are here in an effort to more fully work around Spitfire Errata
5+
* #51. Essentially, if a memory barrier occurs soon after a mispredicted
6+
* branch, the chip can stop executing instructions until a trap occurs.
7+
* Therefore, if interrupts are disabled, the chip can hang forever.
8+
*
9+
* It used to be believed that the memory barrier had to be right in the
10+
* delay slot, but a case has been traced recently wherein the memory barrier
11+
* was one instruction after the branch delay slot and the chip still hung.
12+
* The offending sequence was the following in sym_wakeup_done() of the
13+
* sym53c8xx_2 driver:
14+
*
15+
* call sym_ccb_from_dsa, 0
16+
* movge %icc, 0, %l0
17+
* brz,pn %o0, .LL1303
18+
* mov %o0, %l2
19+
* membar #LoadLoad
20+
*
21+
* The branch has to be mispredicted for the bug to occur. Therefore, we put
22+
* the memory barrier explicitly into a "branch always, predicted taken"
23+
* delay slot to avoid the problem case.
24+
*/
25+
#define membar_safe(type) \
26+
do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
27+
" membar " type "\n" \
28+
"1:\n" \
29+
: : : "memory"); \
30+
} while (0)
31+
32+
/* The kernel always executes in TSO memory model these days,
33+
* and furthermore most sparc64 chips implement more stringent
34+
* memory ordering than required by the specifications.
35+
*/
36+
#define mb() membar_safe("#StoreLoad")
37+
#define rmb() __asm__ __volatile__("":::"memory")
38+
#define wmb() __asm__ __volatile__("":::"memory")
39+
40+
#define read_barrier_depends() do { } while(0)
41+
#define set_mb(__var, __value) \
42+
do { __var = __value; membar_safe("#StoreLoad"); } while(0)
43+
44+
#ifdef CONFIG_SMP
45+
#define smp_mb() mb()
46+
#define smp_rmb() rmb()
47+
#define smp_wmb() wmb()
48+
#else
49+
#define smp_mb() __asm__ __volatile__("":::"memory")
50+
#define smp_rmb() __asm__ __volatile__("":::"memory")
51+
#define smp_wmb() __asm__ __volatile__("":::"memory")
52+
#endif
53+
54+
#define smp_read_barrier_depends() do { } while(0)
55+
56+
#endif /* !(__SPARC64_BARRIER_H) */

arch/sparc/include/asm/bug.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,7 @@ extern void do_BUG(const char *file, int line);
1919

2020
#include <asm-generic/bug.h>
2121

22+
struct pt_regs;
23+
extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
24+
2225
#endif

arch/sparc/include/asm/cacheflush_32.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,4 +83,13 @@ extern void sparc_flush_page_to_ram(struct page *page);
8383
#define flush_cache_vmap(start, end) flush_cache_all()
8484
#define flush_cache_vunmap(start, end) flush_cache_all()
8585

86+
/* When a context switch happens we must flush all user windows so that
87+
* the windows of the current process are flushed onto its stack. This
88+
* way the windows are all clean for the next process and the stack
89+
* frames are up to date.
90+
*/
91+
extern void flush_user_windows(void);
92+
extern void kill_user_windows(void);
93+
extern void flushw_all(void);
94+
8695
#endif /* _SPARC_CACHEFLUSH_H */

arch/sparc/include/asm/cacheflush_64.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,16 @@
99

1010
/* Cache flush operations. */
1111

12+
13+
#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
14+
#define flushw_all() __asm__ __volatile__("flushw")
15+
16+
extern void __flushw_user(void);
17+
#define flushw_user() __flushw_user()
18+
19+
#define flush_user_windows flushw_user
20+
#define flush_register_windows flushw_all
21+
1222
/* These are the same regardless of whether this is an SMP kernel or not. */
1323
#define flush_cache_mm(__mm) \
1424
do { if ((__mm) == current->mm) flushw_user(); } while(0)

arch/sparc/include/asm/cmpxchg.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
#ifndef ___ASM_SPARC_CMPXCHG_H
2+
#define ___ASM_SPARC_CMPXCHG_H
3+
#if defined(__sparc__) && defined(__arch64__)
4+
#include <asm/cmpxchg_64.h>
5+
#else
6+
#include <asm/cmpxchg_32.h>
7+
#endif
8+
#endif

0 commit comments

Comments
 (0)