|
9 | 9 | #ifndef __ASM_BARRIER_H |
10 | 10 | #define __ASM_BARRIER_H |
11 | 11 |
|
12 | | -#ifdef CONFIG_SMP |
13 | | - |
14 | 12 | #ifdef CONFIG_ISA_ARCV2 |
15 | 13 |
|
16 | | -/* DMB + SYNC semantics */ |
17 | | -#define mb() asm volatile("dsync\n": : : "memory") |
| 14 | +/* |
| 15 | + * ARCv2 based HS38 cores are in-order issue, but still weakly ordered |
| 16 | + * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ... |
| 17 | + * Explicit barrier provided by DMB instruction with operand supporting |
| 18 | + * load/store/load+store semantics |
| 19 | + * |
| 20 | + * - DMB gaurantees SMP as well as local barrier semantics |
| 21 | + * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e. |
| 22 | + * UP: barrier(), SMP: smp_*mb == *mb) |
| 23 | + * - DSYNC provides DMB+completion_of_cache_bpu_ops hence not needed |
| 24 | + * in the general case. Plus it only provides full barrier. |
| 25 | + */ |
18 | 26 |
|
19 | | -#define smp_mb() asm volatile("dmb 3\n": : : "memory") |
20 | | -#define smp_rmb() asm volatile("dmb 1\n": : : "memory") |
21 | | -#define smp_wmb() asm volatile("dmb 2\n": : : "memory") |
| 27 | +#define mb() asm volatile("dmb 3\n": : : "memory") |
| 28 | +#define rmb() asm volatile("dmb 1\n": : : "memory") |
| 29 | +#define wmb() asm volatile("dmb 2\n": : : "memory") |
22 | 30 |
|
23 | | -#else /* CONFIG_ISA_ARCOMPACT */ |
| 31 | +#endif |
24 | 32 |
|
25 | | -#define mb() asm volatile("sync \n" : : : "memory") |
| 33 | +#ifdef CONFIG_ISA_ARCOMPACT |
26 | 34 |
|
27 | | -#endif /* CONFIG_ISA_ARCV2 */ |
| 35 | +/* |
| 36 | + * ARCompact based cores (ARC700) only have SYNC instruction which is super |
| 37 | + * heavy weight as it flushes the pipeline as well. |
| 38 | + * There are no real SMP implementations of such cores. |
| 39 | + */ |
28 | 40 |
|
29 | | -#endif /* CONFIG_SMP */ |
| 41 | +#define mb() asm volatile("sync \n" : : : "memory") |
| 42 | +#endif |
30 | 43 |
|
31 | 44 | #include <asm-generic/barrier.h> |
32 | 45 |
|
|
0 commit comments