Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
atomic.h
Go to the documentation of this file.
1 /*
2  * Atomic operations for the Hexagon architecture
3  *
4  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5  *
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 and
9  * only version 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19  * 02110-1301, USA.
20  */
21 
22 #ifndef _ASM_ATOMIC_H
23 #define _ASM_ATOMIC_H
24 
25 #include <linux/types.h>
26 #include <asm/cmpxchg.h>
27 
28 #define ATOMIC_INIT(i) { (i) }
29 #define atomic_set(v, i) ((v)->counter = (i))
30 
37 #define atomic_read(v) ((v)->counter)
38 
44 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
45 
46 
64 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
65 {
66  int __oldval;
67 
68  asm volatile(
69  "1: %0 = memw_locked(%1);\n"
70  " { P0 = cmp.eq(%0,%2);\n"
71  " if (!P0.new) jump:nt 2f; }\n"
72  " memw_locked(%1,P0) = %3;\n"
73  " if (!P0) jump 1b;\n"
74  "2:\n"
75  : "=&r" (__oldval)
76  : "r" (&v->counter), "r" (old), "r" (new)
77  : "memory", "p0"
78  );
79 
80  return __oldval;
81 }
82 
83 static inline int atomic_add_return(int i, atomic_t *v)
84 {
85  int output;
86 
87  __asm__ __volatile__ (
88  "1: %0 = memw_locked(%1);\n"
89  " %0 = add(%0,%2);\n"
90  " memw_locked(%1,P3)=%0;\n"
91  " if !P3 jump 1b;\n"
92  : "=&r" (output)
93  : "r" (&v->counter), "r" (i)
94  : "memory", "p3"
95  );
96  return output;
97 
98 }
99 
100 #define atomic_add(i, v) atomic_add_return(i, (v))
101 
102 static inline int atomic_sub_return(int i, atomic_t *v)
103 {
104  int output;
105  __asm__ __volatile__ (
106  "1: %0 = memw_locked(%1);\n"
107  " %0 = sub(%0,%2);\n"
108  " memw_locked(%1,P3)=%0\n"
109  " if !P3 jump 1b;\n"
110  : "=&r" (output)
111  : "r" (&v->counter), "r" (i)
112  : "memory", "p3"
113  );
114  return output;
115 }
116 
117 #define atomic_sub(i, v) atomic_sub_return(i, (v))
118 
127 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
128 {
129  int output, __oldval;
130  asm volatile(
131  "1: %0 = memw_locked(%2);"
132  " {"
133  " p3 = cmp.eq(%0, %4);"
134  " if (p3.new) jump:nt 2f;"
135  " %0 = add(%0, %3);"
136  " %1 = #0;"
137  " }"
138  " memw_locked(%2, p3) = %0;"
139  " {"
140  " if !p3 jump 1b;"
141  " %1 = #1;"
142  " }"
143  "2:"
144  : "=&r" (__oldval), "=&r" (output)
145  : "r" (v), "r" (a), "r" (u)
146  : "memory", "p3"
147  );
148  return output;
149 }
150 
151 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
152 
153 #define atomic_inc(v) atomic_add(1, (v))
154 #define atomic_dec(v) atomic_sub(1, (v))
155 
156 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
157 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
158 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
159 #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
160 
161 
162 #define atomic_inc_return(v) (atomic_add_return(1, v))
163 #define atomic_dec_return(v) (atomic_sub_return(1, v))
164 
165 #endif