Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
spinlock_32.c
Go to the documentation of this file.
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation, version 2.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  * NON INFRINGEMENT. See the GNU General Public License for
12  * more details.
13  */
14 
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <asm/processor.h>
18 #include <arch/spr_def.h>
19 
20 #include "spinlock_common.h"
21 
23 {
24  int my_ticket;
25  int iterations = 0;
26  int delta;
27 
28  while ((my_ticket = __insn_tns((void *)&lock->next_ticket)) & 1)
29  delay_backoff(iterations++);
30 
31  /* Increment the next ticket number, implicitly releasing tns lock. */
32  lock->next_ticket = my_ticket + TICKET_QUANTUM;
33 
34  /* Wait until it's our turn. */
35  while ((delta = my_ticket - lock->current_ticket) != 0)
36  relax((128 / CYCLES_PER_RELAX_LOOP) * delta);
37 }
39 
41 {
42  /*
43  * Grab a ticket; no need to retry if it's busy, we'll just
44  * treat that the same as "locked", since someone else
45  * will lock it momentarily anyway.
46  */
47  int my_ticket = __insn_tns((void *)&lock->next_ticket);
48 
49  if (my_ticket == lock->current_ticket) {
50  /* Not currently locked, so lock it by keeping this ticket. */
51  lock->next_ticket = my_ticket + TICKET_QUANTUM;
52  /* Success! */
53  return 1;
54  }
55 
56  if (!(my_ticket & 1)) {
57  /* Release next_ticket. */
58  lock->next_ticket = my_ticket;
59  }
60 
61  return 0;
62 }
64 
66 {
67  u32 iterations = 0;
68  while (arch_spin_is_locked(lock))
69  delay_backoff(iterations++);
70 }
72 
73 /*
74  * The low byte is always reserved to be the marker for a "tns" operation
75  * since the low bit is set to "1" by a tns. The next seven bits are
76  * zeroes. The next byte holds the "next" writer value, i.e. the ticket
77  * available for the next task that wants to write. The third byte holds
78  * the current writer value, i.e. the writer who holds the current ticket.
79  * If current == next == 0, there are no interested writers.
80  */
81 #define WR_NEXT_SHIFT _WR_NEXT_SHIFT
82 #define WR_CURR_SHIFT _WR_CURR_SHIFT
83 #define WR_WIDTH _WR_WIDTH
84 #define WR_MASK ((1 << WR_WIDTH) - 1)
85 
86 /*
87  * The last eight bits hold the active reader count. This has to be
88  * zero before a writer can start to write.
89  */
90 #define RD_COUNT_SHIFT _RD_COUNT_SHIFT
91 #define RD_COUNT_WIDTH _RD_COUNT_WIDTH
92 #define RD_COUNT_MASK ((1 << RD_COUNT_WIDTH) - 1)
93 
94 
95 /*
96  * We can get the read lock if everything but the reader bits (which
97  * are in the high part of the word) is zero, i.e. no active or
98  * waiting writers, no tns.
99  *
100  * We guard the tns/store-back with an interrupt critical section to
101  * preserve the semantic that the same read lock can be acquired in an
102  * interrupt context.
103  */
104 inline int arch_read_trylock(arch_rwlock_t *rwlock)
105 {
106  u32 val;
107  __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
108  val = __insn_tns((int *)&rwlock->lock);
109  if (likely((val << _RD_COUNT_WIDTH) == 0)) {
110  val += 1 << RD_COUNT_SHIFT;
111  rwlock->lock = val;
112  __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
113  BUG_ON(val == 0); /* we don't expect wraparound */
114  return 1;
115  }
116  if ((val & 1) == 0)
117  rwlock->lock = val;
118  __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
119  return 0;
120 }
122 
123 /*
124  * Spin doing arch_read_trylock() until we acquire the lock.
125  * ISSUE: This approach can permanently starve readers. A reader who sees
126  * a writer could instead take a ticket lock (just like a writer would),
127  * and atomically enter read mode (with 1 reader) when it gets the ticket.
128  * This way both readers and writers would always make forward progress
129  * in a finite time.
130  */
132 {
133  u32 iterations = 0;
134  while (unlikely(!arch_read_trylock(rwlock)))
135  delay_backoff(iterations++);
136 }
138 
140 {
141  u32 val, iterations = 0;
142 
143  mb(); /* guarantee anything modified under the lock is visible */
144  for (;;) {
145  __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
146  val = __insn_tns((int *)&rwlock->lock);
147  if (likely((val & 1) == 0)) {
148  rwlock->lock = val - (1 << _RD_COUNT_SHIFT);
149  __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
150  break;
151  }
152  __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
153  delay_backoff(iterations++);
154  }
155 }
157 
158 /*
159  * We don't need an interrupt critical section here (unlike for
160  * arch_read_lock) since we should never use a bare write lock where
161  * it could be interrupted by code that could try to re-acquire it.
162  */
164 {
165  /*
166  * The trailing underscore on this variable (and curr_ below)
167  * reminds us that the high bits are garbage; we mask them out
168  * when we compare them.
169  */
170  u32 my_ticket_;
171  u32 iterations = 0;
172  u32 val = __insn_tns((int *)&rwlock->lock);
173 
174  if (likely(val == 0)) {
175  rwlock->lock = 1 << _WR_NEXT_SHIFT;
176  return;
177  }
178 
179  /*
180  * Wait until there are no readers, then bump up the next
181  * field and capture the ticket value.
182  */
183  for (;;) {
184  if (!(val & 1)) {
185  if ((val >> RD_COUNT_SHIFT) == 0)
186  break;
187  rwlock->lock = val;
188  }
189  delay_backoff(iterations++);
190  val = __insn_tns((int *)&rwlock->lock);
191  }
192 
193  /* Take out the next ticket and extract my ticket value. */
194  rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);
195  my_ticket_ = val >> WR_NEXT_SHIFT;
196 
197  /* Wait until the "current" field matches our ticket. */
198  for (;;) {
199  u32 curr_ = val >> WR_CURR_SHIFT;
200  u32 delta = ((my_ticket_ - curr_) & WR_MASK);
201  if (likely(delta == 0))
202  break;
203 
204  /* Delay based on how many lock-holders are still out there. */
205  relax((256 / CYCLES_PER_RELAX_LOOP) * delta);
206 
207  /*
208  * Get a non-tns value to check; we don't need to tns
209  * it ourselves. Since we're not tns'ing, we retry
210  * more rapidly to get a valid value.
211  */
212  while ((val = rwlock->lock) & 1)
213  relax(4);
214  }
215 }
217 
219 {
220  u32 val = __insn_tns((int *)&rwlock->lock);
221 
222  /*
223  * If a tns is in progress, or there's a waiting or active locker,
224  * or active readers, we can't take the lock, so give up.
225  */
226  if (unlikely(val != 0)) {
227  if (!(val & 1))
228  rwlock->lock = val;
229  return 0;
230  }
231 
232  /* Set the "next" field to mark it locked. */
233  rwlock->lock = 1 << _WR_NEXT_SHIFT;
234  return 1;
235 }
237 
239 {
240  u32 val, eq, mask;
241 
242  mb(); /* guarantee anything modified under the lock is visible */
243  val = __insn_tns((int *)&rwlock->lock);
244  if (likely(val == (1 << _WR_NEXT_SHIFT))) {
245  rwlock->lock = 0;
246  return;
247  }
248  while (unlikely(val & 1)) {
249  /* Limited backoff since we are the highest-priority task. */
250  relax(4);
251  val = __insn_tns((int *)&rwlock->lock);
252  }
253  mask = 1 << WR_CURR_SHIFT;
254  val = __insn_addb(val, mask);
255  eq = __insn_seqb(val, val << (WR_CURR_SHIFT - WR_NEXT_SHIFT));
256  val = __insn_mz(eq & mask, val);
257  rwlock->lock = val;
258 }
259 EXPORT_SYMBOL(arch_write_unlock);