Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
hwspinlock_core.c
Go to the documentation of this file.
1 /*
2  * Hardware spinlock framework
3  *
4  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5  *
6  * Contact: Ohad Ben-Cohen <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License version 2 as published
10  * by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  */
17 
18 #define pr_fmt(fmt) "%s: " fmt, __func__
19 
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24 #include <linux/err.h>
25 #include <linux/jiffies.h>
26 #include <linux/radix-tree.h>
27 #include <linux/hwspinlock.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/mutex.h>
30 
31 #include "hwspinlock_internal.h"
32 
33 /* radix tree tags */
34 #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
35 
36 /*
37  * A radix tree is used to maintain the available hwspinlock instances.
38  * The tree associates hwspinlock pointers with their integer key id,
39  * and provides easy-to-use API which makes the hwspinlock core code simple
40  * and easy to read.
41  *
42  * Radix trees are quick on lookups, and reasonably efficient in terms of
43  * storage, especially with high density usages such as this framework
44  * requires (a continuous range of integer keys, beginning with zero, is
45  * used as the ID's of the hwspinlock instances).
46  *
47  * The radix tree API supports tagging items in the tree, which this
48  * framework uses to mark unused hwspinlock instances (see the
49  * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
50  * tree, looking for an unused hwspinlock instance, is now reduced to a
51  * single radix tree API call.
52  */
53 static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
54 
55 /*
56  * Synchronization of access to the tree is achieved using this mutex,
57  * as the radix-tree API requires that users provide all synchronisation.
58  * A mutex is needed because we're using non-atomic radix tree allocations.
59  */
60 static DEFINE_MUTEX(hwspinlock_tree_lock);
61 
62 
88 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
89 {
90  int ret;
91 
92  BUG_ON(!hwlock);
93  BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
94 
95  /*
96  * This spin_lock{_irq, _irqsave} serves three purposes:
97  *
98  * 1. Disable preemption, in order to minimize the period of time
99  * in which the hwspinlock is taken. This is important in order
100  * to minimize the possible polling on the hardware interconnect
101  * by a remote user of this lock.
102  * 2. Make the hwspinlock SMP-safe (so we can take it from
103  * additional contexts on the local host).
104  * 3. Ensure that in_atomic/might_sleep checks catch potential
105  * problems with hwspinlock usage (e.g. scheduler checks like
106  * 'scheduling while atomic' etc.)
107  */
108  if (mode == HWLOCK_IRQSTATE)
109  ret = spin_trylock_irqsave(&hwlock->lock, *flags);
110  else if (mode == HWLOCK_IRQ)
111  ret = spin_trylock_irq(&hwlock->lock);
112  else
113  ret = spin_trylock(&hwlock->lock);
114 
115  /* is lock already taken by another context on the local cpu ? */
116  if (!ret)
117  return -EBUSY;
118 
119  /* try to take the hwspinlock device */
120  ret = hwlock->bank->ops->trylock(hwlock);
121 
122  /* if hwlock is already taken, undo spin_trylock_* and exit */
123  if (!ret) {
124  if (mode == HWLOCK_IRQSTATE)
125  spin_unlock_irqrestore(&hwlock->lock, *flags);
126  else if (mode == HWLOCK_IRQ)
127  spin_unlock_irq(&hwlock->lock);
128  else
129  spin_unlock(&hwlock->lock);
130 
131  return -EBUSY;
132  }
133 
134  /*
135  * We can be sure the other core's memory operations
136  * are observable to us only _after_ we successfully take
137  * the hwspinlock, and we must make sure that subsequent memory
138  * operations (both reads and writes) will not be reordered before
139  * we actually took the hwspinlock.
140  *
141  * Note: the implicit memory barrier of the spinlock above is too
142  * early, so we need this additional explicit memory barrier.
143  */
144  mb();
145 
146  return 0;
147 }
149 
177 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
178  int mode, unsigned long *flags)
179 {
180  int ret;
181  unsigned long expire;
182 
183  expire = msecs_to_jiffies(to) + jiffies;
184 
185  for (;;) {
186  /* Try to take the hwspinlock */
187  ret = __hwspin_trylock(hwlock, mode, flags);
188  if (ret != -EBUSY)
189  break;
190 
191  /*
192  * The lock is already taken, let's check if the user wants
193  * us to try again
194  */
195  if (time_is_before_eq_jiffies(expire))
196  return -ETIMEDOUT;
197 
198  /*
199  * Allow platform-specific relax handlers to prevent
200  * hogging the interconnect (no sleeping, though)
201  */
202  if (hwlock->bank->ops->relax)
203  hwlock->bank->ops->relax(hwlock);
204  }
205 
206  return ret;
207 }
209 
229 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
230 {
231  BUG_ON(!hwlock);
232  BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
233 
234  /*
235  * We must make sure that memory operations (both reads and writes),
236  * done before unlocking the hwspinlock, will not be reordered
237  * after the lock is released.
238  *
239  * That's the purpose of this explicit memory barrier.
240  *
241  * Note: the memory barrier induced by the spin_unlock below is too
242  * late; the other core is going to access memory soon after it will
243  * take the hwspinlock, and by then we want to be sure our memory
244  * operations are already observable.
245  */
246  mb();
247 
248  hwlock->bank->ops->unlock(hwlock);
249 
250  /* Undo the spin_trylock{_irq, _irqsave} called while locking */
251  if (mode == HWLOCK_IRQSTATE)
252  spin_unlock_irqrestore(&hwlock->lock, *flags);
253  else if (mode == HWLOCK_IRQ)
254  spin_unlock_irq(&hwlock->lock);
255  else
256  spin_unlock(&hwlock->lock);
257 }
259 
260 static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
261 {
262  struct hwspinlock *tmp;
263  int ret;
264 
265  mutex_lock(&hwspinlock_tree_lock);
266 
267  ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
268  if (ret) {
269  if (ret == -EEXIST)
270  pr_err("hwspinlock id %d already exists!\n", id);
271  goto out;
272  }
273 
274  /* mark this hwspinlock as available */
275  tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
276 
277  /* self-sanity check which should never fail */
278  WARN_ON(tmp != hwlock);
279 
280 out:
281  mutex_unlock(&hwspinlock_tree_lock);
282  return 0;
283 }
284 
285 static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
286 {
287  struct hwspinlock *hwlock = NULL;
288  int ret;
289 
290  mutex_lock(&hwspinlock_tree_lock);
291 
292  /* make sure the hwspinlock is not in use (tag is set) */
293  ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
294  if (ret == 0) {
295  pr_err("hwspinlock %d still in use (or not present)\n", id);
296  goto out;
297  }
298 
299  hwlock = radix_tree_delete(&hwspinlock_tree, id);
300  if (!hwlock) {
301  pr_err("failed to delete hwspinlock %d\n", id);
302  goto out;
303  }
304 
305 out:
306  mutex_unlock(&hwspinlock_tree_lock);
307  return hwlock;
308 }
309 
326  const struct hwspinlock_ops *ops, int base_id, int num_locks)
327 {
328  struct hwspinlock *hwlock;
329  int ret = 0, i;
330 
331  if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
332  !ops->unlock) {
333  pr_err("invalid parameters\n");
334  return -EINVAL;
335  }
336 
337  bank->dev = dev;
338  bank->ops = ops;
339  bank->base_id = base_id;
340  bank->num_locks = num_locks;
341 
342  for (i = 0; i < num_locks; i++) {
343  hwlock = &bank->lock[i];
344 
345  spin_lock_init(&hwlock->lock);
346  hwlock->bank = bank;
347 
348  ret = hwspin_lock_register_single(hwlock, base_id + i);
349  if (ret)
350  goto reg_failed;
351  }
352 
353  return 0;
354 
355 reg_failed:
356  while (--i >= 0)
357  hwspin_lock_unregister_single(base_id + i);
358  return ret;
359 }
361 
374 {
375  struct hwspinlock *hwlock, *tmp;
376  int i;
377 
378  for (i = 0; i < bank->num_locks; i++) {
379  hwlock = &bank->lock[i];
380 
381  tmp = hwspin_lock_unregister_single(bank->base_id + i);
382  if (!tmp)
383  return -EBUSY;
384 
385  /* self-sanity check that should never fail */
386  WARN_ON(tmp != hwlock);
387  }
388 
389  return 0;
390 }
392 
403 static int __hwspin_lock_request(struct hwspinlock *hwlock)
404 {
405  struct device *dev = hwlock->bank->dev;
406  struct hwspinlock *tmp;
407  int ret;
408 
409  /* prevent underlying implementation from being removed */
410  if (!try_module_get(dev->driver->owner)) {
411  dev_err(dev, "%s: can't get owner\n", __func__);
412  return -EINVAL;
413  }
414 
415  /* notify PM core that power is now needed */
416  ret = pm_runtime_get_sync(dev);
417  if (ret < 0) {
418  dev_err(dev, "%s: can't power on device\n", __func__);
419  return ret;
420  }
421 
422  /* mark hwspinlock as used, should not fail */
423  tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
425 
426  /* self-sanity check that should never fail */
427  WARN_ON(tmp != hwlock);
428 
429  return ret;
430 }
431 
438 int hwspin_lock_get_id(struct hwspinlock *hwlock)
439 {
440  if (!hwlock) {
441  pr_err("invalid hwlock\n");
442  return -EINVAL;
443  }
444 
445  return hwlock_to_id(hwlock);
446 }
448 
463 {
464  struct hwspinlock *hwlock;
465  int ret;
466 
467  mutex_lock(&hwspinlock_tree_lock);
468 
469  /* look for an unused lock */
470  ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
471  0, 1, HWSPINLOCK_UNUSED);
472  if (ret == 0) {
473  pr_warn("a free hwspinlock is not available\n");
474  hwlock = NULL;
475  goto out;
476  }
477 
478  /* sanity check that should never fail */
479  WARN_ON(ret > 1);
480 
481  /* mark as used and power up */
482  ret = __hwspin_lock_request(hwlock);
483  if (ret < 0)
484  hwlock = NULL;
485 
486 out:
487  mutex_unlock(&hwspinlock_tree_lock);
488  return hwlock;
489 }
491 
505 struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
506 {
507  struct hwspinlock *hwlock;
508  int ret;
509 
510  mutex_lock(&hwspinlock_tree_lock);
511 
512  /* make sure this hwspinlock exists */
513  hwlock = radix_tree_lookup(&hwspinlock_tree, id);
514  if (!hwlock) {
515  pr_warn("hwspinlock %u does not exist\n", id);
516  goto out;
517  }
518 
519  /* sanity check (this shouldn't happen) */
520  WARN_ON(hwlock_to_id(hwlock) != id);
521 
522  /* make sure this hwspinlock is unused */
523  ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
524  if (ret == 0) {
525  pr_warn("hwspinlock %u is already in use\n", id);
526  hwlock = NULL;
527  goto out;
528  }
529 
530  /* mark as used and power up */
531  ret = __hwspin_lock_request(hwlock);
532  if (ret < 0)
533  hwlock = NULL;
534 
535 out:
536  mutex_unlock(&hwspinlock_tree_lock);
537  return hwlock;
538 }
540 
553 int hwspin_lock_free(struct hwspinlock *hwlock)
554 {
555  struct device *dev;
556  struct hwspinlock *tmp;
557  int ret;
558 
559  if (!hwlock) {
560  pr_err("invalid hwlock\n");
561  return -EINVAL;
562  }
563 
564  dev = hwlock->bank->dev;
565  mutex_lock(&hwspinlock_tree_lock);
566 
567  /* make sure the hwspinlock is used */
568  ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
570  if (ret == 1) {
571  dev_err(dev, "%s: hwlock is already free\n", __func__);
572  dump_stack();
573  ret = -EINVAL;
574  goto out;
575  }
576 
577  /* notify the underlying device that power is not needed */
578  ret = pm_runtime_put(dev);
579  if (ret < 0)
580  goto out;
581 
582  /* mark this hwspinlock as available */
583  tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
585 
586  /* sanity check (this shouldn't happen) */
587  WARN_ON(tmp != hwlock);
588 
589  module_put(dev->driver->owner);
590 
591 out:
592  mutex_unlock(&hwspinlock_tree_lock);
593  return ret;
594 }
596 
597 MODULE_LICENSE("GPL v2");
598 MODULE_DESCRIPTION("Hardware spinlock interface");
599 MODULE_AUTHOR("Ohad Ben-Cohen <[email protected]>");