12
12
#include <asm/barrier.h>
13
13
#include <linux/atomic.h>
14
14
15
- /*
16
- * HP-PARISC specific bit operations
17
- * for a detailed description of the functions please refer
18
- * to include/asm-i386/bitops.h or kerneldoc
19
- */
20
-
21
- #if __BITS_PER_LONG == 64
22
- #define SHIFT_PER_LONG 6
23
- #else
24
- #define SHIFT_PER_LONG 5
25
- #endif
26
-
27
- #define CHOP_SHIFTCOUNT (x ) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
28
-
29
-
30
15
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
31
16
* on use of volatile and __*_bit() (set/clear/change):
32
17
* *_bit() want use of volatile.
35
20
36
21
static __inline__ void set_bit (int nr , volatile unsigned long * addr )
37
22
{
38
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT (nr );
23
+ unsigned long mask = BIT_MASK (nr );
39
24
unsigned long flags ;
40
25
41
- addr += (nr >> SHIFT_PER_LONG );
26
+ addr += BIT_WORD (nr );
42
27
_atomic_spin_lock_irqsave (addr , flags );
43
28
* addr |= mask ;
44
29
_atomic_spin_unlock_irqrestore (addr , flags );
45
30
}
46
31
47
32
static __inline__ void clear_bit (int nr , volatile unsigned long * addr )
48
33
{
49
- unsigned long mask = ~( 1UL << CHOP_SHIFTCOUNT ( nr ) );
34
+ unsigned long mask = BIT_MASK ( nr );
50
35
unsigned long flags ;
51
36
52
- addr += (nr >> SHIFT_PER_LONG );
37
+ addr += BIT_WORD (nr );
53
38
_atomic_spin_lock_irqsave (addr , flags );
54
- * addr &= mask ;
39
+ * addr &= ~ mask ;
55
40
_atomic_spin_unlock_irqrestore (addr , flags );
56
41
}
57
42
58
43
static __inline__ void change_bit (int nr , volatile unsigned long * addr )
59
44
{
60
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT (nr );
45
+ unsigned long mask = BIT_MASK (nr );
61
46
unsigned long flags ;
62
47
63
- addr += (nr >> SHIFT_PER_LONG );
48
+ addr += BIT_WORD (nr );
64
49
_atomic_spin_lock_irqsave (addr , flags );
65
50
* addr ^= mask ;
66
51
_atomic_spin_unlock_irqrestore (addr , flags );
67
52
}
68
53
69
54
static __inline__ int test_and_set_bit (int nr , volatile unsigned long * addr )
70
55
{
71
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT (nr );
56
+ unsigned long mask = BIT_MASK (nr );
72
57
unsigned long old ;
73
58
unsigned long flags ;
74
59
int set ;
75
60
76
- addr += (nr >> SHIFT_PER_LONG );
61
+ addr += BIT_WORD (nr );
77
62
_atomic_spin_lock_irqsave (addr , flags );
78
63
old = * addr ;
79
64
set = (old & mask ) ? 1 : 0 ;
@@ -86,12 +71,12 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
86
71
87
72
static __inline__ int test_and_clear_bit (int nr , volatile unsigned long * addr )
88
73
{
89
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT (nr );
74
+ unsigned long mask = BIT_MASK (nr );
90
75
unsigned long old ;
91
76
unsigned long flags ;
92
77
int set ;
93
78
94
- addr += (nr >> SHIFT_PER_LONG );
79
+ addr += BIT_WORD (nr );
95
80
_atomic_spin_lock_irqsave (addr , flags );
96
81
old = * addr ;
97
82
set = (old & mask ) ? 1 : 0 ;
@@ -104,11 +89,11 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
104
89
105
90
static __inline__ int test_and_change_bit (int nr , volatile unsigned long * addr )
106
91
{
107
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT (nr );
92
+ unsigned long mask = BIT_MASK (nr );
108
93
unsigned long oldbit ;
109
94
unsigned long flags ;
110
95
111
- addr += (nr >> SHIFT_PER_LONG );
96
+ addr += BIT_WORD (nr );
112
97
_atomic_spin_lock_irqsave (addr , flags );
113
98
oldbit = * addr ;
114
99
* addr = oldbit ^ mask ;
0 commit comments