Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bitops.h
Go to the documentation of this file.
1 /* MN10300 bit operations
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells ([email protected])
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  *
11  * These have to be done with inline assembly: that way the bit-setting
12  * is guaranteed to be atomic. All bit operations return 0 if the bit
13  * was cleared before the operation and != 0 if it was not.
14  *
15  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
16  */
17 #ifndef __ASM_BITOPS_H
18 #define __ASM_BITOPS_H
19 
20 #include <asm/cpu-regs.h>
21 
22 #define smp_mb__before_clear_bit() barrier()
23 #define smp_mb__after_clear_bit() barrier()
24 
25 /*
26  * set bit
27  */
28 #define __set_bit(nr, addr) \
29 ({ \
30  volatile unsigned char *_a = (unsigned char *)(addr); \
31  const unsigned shift = (nr) & 7; \
32  _a += (nr) >> 3; \
33  \
34  asm volatile("bset %2,(%1) # set_bit reg" \
35  : "=m"(*_a) \
36  : "a"(_a), "d"(1 << shift), "m"(*_a) \
37  : "memory", "cc"); \
38 })
39 
40 #define set_bit(nr, addr) __set_bit((nr), (addr))
41 
42 /*
43  * clear bit
44  */
45 #define ___clear_bit(nr, addr) \
46 ({ \
47  volatile unsigned char *_a = (unsigned char *)(addr); \
48  const unsigned shift = (nr) & 7; \
49  _a += (nr) >> 3; \
50  \
51  asm volatile("bclr %2,(%1) # clear_bit reg" \
52  : "=m"(*_a) \
53  : "a"(_a), "d"(1 << shift), "m"(*_a) \
54  : "memory", "cc"); \
55 })
56 
57 #define clear_bit(nr, addr) ___clear_bit((nr), (addr))
58 
59 
60 static inline void __clear_bit(unsigned long nr, volatile void *addr)
61 {
62  unsigned int *a = (unsigned int *) addr;
63  int mask;
64 
65  a += nr >> 5;
66  mask = 1 << (nr & 0x1f);
67  *a &= ~mask;
68 }
69 
70 /*
71  * test bit
72  */
73 static inline int test_bit(unsigned long nr, const volatile void *addr)
74 {
75  return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
76 }
77 
78 /*
79  * change bit
80  */
81 static inline void __change_bit(unsigned long nr, volatile void *addr)
82 {
83  int mask;
84  unsigned int *a = (unsigned int *) addr;
85 
86  a += nr >> 5;
87  mask = 1 << (nr & 0x1f);
88  *a ^= mask;
89 }
90 
91 extern void change_bit(unsigned long nr, volatile void *addr);
92 
93 /*
94  * test and set bit
95  */
96 #define __test_and_set_bit(nr,addr) \
97 ({ \
98  volatile unsigned char *_a = (unsigned char *)(addr); \
99  const unsigned shift = (nr) & 7; \
100  unsigned epsw; \
101  _a += (nr) >> 3; \
102  \
103  asm volatile("bset %3,(%2) # test_set_bit reg\n" \
104  "mov epsw,%1" \
105  : "=m"(*_a), "=d"(epsw) \
106  : "a"(_a), "d"(1 << shift), "m"(*_a) \
107  : "memory", "cc"); \
108  \
109  !(epsw & EPSW_FLAG_Z); \
110 })
111 
112 #define test_and_set_bit(nr, addr) __test_and_set_bit((nr), (addr))
113 
114 /*
115  * test and clear bit
116  */
117 #define __test_and_clear_bit(nr, addr) \
118 ({ \
119  volatile unsigned char *_a = (unsigned char *)(addr); \
120  const unsigned shift = (nr) & 7; \
121  unsigned epsw; \
122  _a += (nr) >> 3; \
123  \
124  asm volatile("bclr %3,(%2) # test_clear_bit reg\n" \
125  "mov epsw,%1" \
126  : "=m"(*_a), "=d"(epsw) \
127  : "a"(_a), "d"(1 << shift), "m"(*_a) \
128  : "memory", "cc"); \
129  \
130  !(epsw & EPSW_FLAG_Z); \
131 })
132 
133 #define test_and_clear_bit(nr, addr) __test_and_clear_bit((nr), (addr))
134 
135 /*
136  * test and change bit
137  */
138 static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
139 {
140  int mask, retval;
141  unsigned int *a = (unsigned int *)addr;
142 
143  a += nr >> 5;
144  mask = 1 << (nr & 0x1f);
145  retval = (mask & *a) != 0;
146  *a ^= mask;
147 
148  return retval;
149 }
150 
151 extern int test_and_change_bit(unsigned long nr, volatile void *addr);
152 
153 #include <asm-generic/bitops/lock.h>
154 
155 #ifdef __KERNEL__
156 
164 static inline __attribute__((const))
165 unsigned long __ffs(unsigned long x)
166 {
167  int bit;
168  asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(x & -x) : "cc");
169  return bit;
170 }
171 
172 /*
173  * special slimline version of fls() for calculating ilog2_u32()
174  * - note: no protection against n == 0
175  */
176 static inline __attribute__((const))
177 int __ilog2_u32(u32 n)
178 {
179  int bit;
180  asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(n) : "cc");
181  return bit;
182 }
183 
192 static inline __attribute__((const))
193 int fls(int x)
194 {
195  return (x != 0) ? __ilog2_u32(x) + 1 : 0;
196 }
197 
204 static inline unsigned long __fls(unsigned long word)
205 {
206  return __ilog2_u32(word);
207 }
208 
216 static inline __attribute__((const))
217 int ffs(int x)
218 {
219  /* Note: (x & -x) gives us a mask that is the least significant
220  * (rightmost) 1-bit of the value in x.
221  */
222  return fls(x & -x);
223 }
224 
225 #include <asm-generic/bitops/ffz.h>
227 #include <asm-generic/bitops/find.h>
231 #include <asm-generic/bitops/le.h>
232 
233 #endif /* __KERNEL__ */
234 #endif /* __ASM_BITOPS_H */