Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bitops_64.h
Go to the documentation of this file.
1 /*
2  * Copyright 2011 Tilera Corporation. All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation, version 2.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  * NON INFRINGEMENT. See the GNU General Public License for
12  * more details.
13  */
14 
15 #ifndef _ASM_TILE_BITOPS_64_H
16 #define _ASM_TILE_BITOPS_64_H
17 
18 #include <linux/compiler.h>
19 #include <linux/atomic.h>
20 
21 /* See <asm/bitops.h> for API comments. */
22 
23 static inline void set_bit(unsigned nr, volatile unsigned long *addr)
24 {
25  unsigned long mask = (1UL << (nr % BITS_PER_LONG));
26  __insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
27 }
28 
29 static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
30 {
31  unsigned long mask = (1UL << (nr % BITS_PER_LONG));
32  __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
33 }
34 
35 #define smp_mb__before_clear_bit() smp_mb()
36 #define smp_mb__after_clear_bit() smp_mb()
37 
38 
39 static inline void change_bit(unsigned nr, volatile unsigned long *addr)
40 {
41  unsigned long mask = (1UL << (nr % BITS_PER_LONG));
42  unsigned long guess, oldval;
43  addr += nr / BITS_PER_LONG;
44  oldval = *addr;
45  do {
46  guess = oldval;
47  oldval = atomic64_cmpxchg((atomic64_t *)addr,
48  guess, guess ^ mask);
49  } while (guess != oldval);
50 }
51 
52 
53 /*
54  * The test_and_xxx_bit() routines require a memory fence before we
55  * start the operation, and after the operation completes. We use
56  * smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
57  * barrier(), to block until the atomic op is complete.
58  */
59 
60 static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
61 {
62  int val;
63  unsigned long mask = (1UL << (nr % BITS_PER_LONG));
64  smp_mb(); /* barrier for proper semantics */
65  val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
66  & mask) != 0;
67  barrier();
68  return val;
69 }
70 
71 
72 static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
73 {
74  int val;
75  unsigned long mask = (1UL << (nr % BITS_PER_LONG));
76  smp_mb(); /* barrier for proper semantics */
77  val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
78  & mask) != 0;
79  barrier();
80  return val;
81 }
82 
83 
84 static inline int test_and_change_bit(unsigned nr,
85  volatile unsigned long *addr)
86 {
87  unsigned long mask = (1UL << (nr % BITS_PER_LONG));
88  unsigned long guess, oldval;
89  addr += nr / BITS_PER_LONG;
90  oldval = *addr;
91  do {
92  guess = oldval;
93  oldval = atomic64_cmpxchg((atomic64_t *)addr,
94  guess, guess ^ mask);
95  } while (guess != oldval);
96  return (oldval & mask) != 0;
97 }
98 
100 
101 #endif /* _ASM_TILE_BITOPS_64_H */