Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cpumask.h
Go to the documentation of this file.
1 #ifndef __LINUX_CPUMASK_H
2 #define __LINUX_CPUMASK_H
3 
4 /*
5  * Cpumasks provide a bitmap suitable for representing the
6  * set of CPU's in a system, one bit position per CPU number. In general,
7  * only nr_cpu_ids (<= NR_CPUS) bits are valid.
8  */
9 #include <linux/kernel.h>
10 #include <linux/threads.h>
11 #include <linux/bitmap.h>
12 #include <linux/bug.h>
13 
15 
23 #define cpumask_bits(maskp) ((maskp)->bits)
24 
25 #if NR_CPUS == 1
26 #define nr_cpu_ids 1
27 #else
28 extern int nr_cpu_ids;
29 #endif
30 
31 #ifdef CONFIG_CPUMASK_OFFSTACK
32 /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
33  * not all bits may be allocated. */
34 #define nr_cpumask_bits nr_cpu_ids
35 #else
36 #define nr_cpumask_bits NR_CPUS
37 #endif
38 
39 /*
40  * The following particular system cpumasks and operations manage
41  * possible, present, active and online cpus.
42  *
43  * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
44  * cpu_present_mask - has bit 'cpu' set iff cpu is populated
45  * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
46  * cpu_active_mask - has bit 'cpu' set iff cpu available to migration
47  *
48  * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
49  *
50  * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
51  * that it is possible might ever be plugged in at anytime during the
52  * life of that system boot. The cpu_present_mask is dynamic(*),
53  * representing which CPUs are currently plugged in. And
54  * cpu_online_mask is the dynamic subset of cpu_present_mask,
55  * indicating those CPUs available for scheduling.
56  *
57  * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
58  * all NR_CPUS bits set, otherwise it is just the set of CPUs that
59  * ACPI reports present at boot.
60  *
61  * If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
62  * depending on what ACPI reports as currently plugged in, otherwise
63  * cpu_present_mask is just a copy of cpu_possible_mask.
64  *
65  * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not
66  * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
67  *
68  * Subtleties:
69  * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
70  * assumption that their single CPU is online. The UP
71  * cpu_{online,possible,present}_masks are placebos. Changing them
72  * will have no useful affect on the following num_*_cpus()
73  * and cpu_*() macros in the UP case. This ugliness is a UP
74  * optimization - don't waste any instructions or memory references
75  * asking if you're online or how many CPUs there are if there is
76  * only one CPU.
77  */
78 
79 extern const struct cpumask *const cpu_possible_mask;
80 extern const struct cpumask *const cpu_online_mask;
81 extern const struct cpumask *const cpu_present_mask;
82 extern const struct cpumask *const cpu_active_mask;
83 
84 #if NR_CPUS > 1
85 #define num_online_cpus() cpumask_weight(cpu_online_mask)
86 #define num_possible_cpus() cpumask_weight(cpu_possible_mask)
87 #define num_present_cpus() cpumask_weight(cpu_present_mask)
88 #define num_active_cpus() cpumask_weight(cpu_active_mask)
89 #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
90 #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
91 #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
92 #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
93 #else
94 #define num_online_cpus() 1U
95 #define num_possible_cpus() 1U
96 #define num_present_cpus() 1U
97 #define num_active_cpus() 1U
98 #define cpu_online(cpu) ((cpu) == 0)
99 #define cpu_possible(cpu) ((cpu) == 0)
100 #define cpu_present(cpu) ((cpu) == 0)
101 #define cpu_active(cpu) ((cpu) == 0)
102 #endif
103 
104 /* verify cpu argument to cpumask_* operators */
105 static inline unsigned int cpumask_check(unsigned int cpu)
106 {
107 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
109 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
110  return cpu;
111 }
112 
113 #if NR_CPUS == 1
114 /* Uniprocessor. Assume all masks are "1". */
115 static inline unsigned int cpumask_first(const struct cpumask *srcp)
116 {
117  return 0;
118 }
119 
120 /* Valid inputs for n are -1 and 0. */
121 static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
122 {
123  return n+1;
124 }
125 
126 static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
127 {
128  return n+1;
129 }
130 
131 static inline unsigned int cpumask_next_and(int n,
132  const struct cpumask *srcp,
133  const struct cpumask *andp)
134 {
135  return n+1;
136 }
137 
138 /* cpu must be a valid cpu, ie 0, so there's no other choice. */
139 static inline unsigned int cpumask_any_but(const struct cpumask *mask,
140  unsigned int cpu)
141 {
142  return 1;
143 }
144 
145 #define for_each_cpu(cpu, mask) \
146  for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
147 #define for_each_cpu_not(cpu, mask) \
148  for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
149 #define for_each_cpu_and(cpu, mask, and) \
150  for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
151 #else
152 
158 static inline unsigned int cpumask_first(const struct cpumask *srcp)
159 {
161 }
162 
170 static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
171 {
172  /* -1 is a legal arg here. */
173  if (n != -1)
174  cpumask_check(n);
175  return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
176 }
177 
185 static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
186 {
187  /* -1 is a legal arg here. */
188  if (n != -1)
189  cpumask_check(n);
190  return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
191 }
192 
193 int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
194 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
195 
203 #define for_each_cpu(cpu, mask) \
204  for ((cpu) = -1; \
205  (cpu) = cpumask_next((cpu), (mask)), \
206  (cpu) < nr_cpu_ids;)
207 
215 #define for_each_cpu_not(cpu, mask) \
216  for ((cpu) = -1; \
217  (cpu) = cpumask_next_zero((cpu), (mask)), \
218  (cpu) < nr_cpu_ids;)
219 
234 #define for_each_cpu_and(cpu, mask, and) \
235  for ((cpu) = -1; \
236  (cpu) = cpumask_next_and((cpu), (mask), (and)), \
237  (cpu) < nr_cpu_ids;)
238 #endif /* SMP */
239 
240 #define CPU_BITS_NONE \
241 { \
242  [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
243 }
244 
245 #define CPU_BITS_CPU0 \
246 { \
247  [0] = 1UL \
248 }
249 
255 static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
256 {
257  set_bit(cpumask_check(cpu), cpumask_bits(dstp));
258 }
259 
265 static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
266 {
267  clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
268 }
269 
279 #define cpumask_test_cpu(cpu, cpumask) \
280  test_bit(cpumask_check(cpu), cpumask_bits((cpumask)))
281 
291 static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
292 {
293  return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
294 }
295 
305 static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
306 {
307  return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
308 }
309 
314 static inline void cpumask_setall(struct cpumask *dstp)
315 {
316  bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
317 }
318 
323 static inline void cpumask_clear(struct cpumask *dstp)
324 {
325  bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
326 }
327 
336 static inline int cpumask_and(struct cpumask *dstp,
337  const struct cpumask *src1p,
338  const struct cpumask *src2p)
339 {
340  return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
341  cpumask_bits(src2p), nr_cpumask_bits);
342 }
343 
350 static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
351  const struct cpumask *src2p)
352 {
353  bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
354  cpumask_bits(src2p), nr_cpumask_bits);
355 }
356 
363 static inline void cpumask_xor(struct cpumask *dstp,
364  const struct cpumask *src1p,
365  const struct cpumask *src2p)
366 {
367  bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
368  cpumask_bits(src2p), nr_cpumask_bits);
369 }
370 
379 static inline int cpumask_andnot(struct cpumask *dstp,
380  const struct cpumask *src1p,
381  const struct cpumask *src2p)
382 {
383  return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
384  cpumask_bits(src2p), nr_cpumask_bits);
385 }
386 
392 static inline void cpumask_complement(struct cpumask *dstp,
393  const struct cpumask *srcp)
394 {
395  bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
397 }
398 
404 static inline bool cpumask_equal(const struct cpumask *src1p,
405  const struct cpumask *src2p)
406 {
407  return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
409 }
410 
416 static inline bool cpumask_intersects(const struct cpumask *src1p,
417  const struct cpumask *src2p)
418 {
419  return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
421 }
422 
430 static inline int cpumask_subset(const struct cpumask *src1p,
431  const struct cpumask *src2p)
432 {
433  return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
435 }
436 
441 static inline bool cpumask_empty(const struct cpumask *srcp)
442 {
443  return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
444 }
445 
450 static inline bool cpumask_full(const struct cpumask *srcp)
451 {
452  return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
453 }
454 
459 static inline unsigned int cpumask_weight(const struct cpumask *srcp)
460 {
461  return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
462 }
463 
470 static inline void cpumask_shift_right(struct cpumask *dstp,
471  const struct cpumask *srcp, int n)
472 {
473  bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
475 }
476 
483 static inline void cpumask_shift_left(struct cpumask *dstp,
484  const struct cpumask *srcp, int n)
485 {
486  bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
488 }
489 
495 static inline void cpumask_copy(struct cpumask *dstp,
496  const struct cpumask *srcp)
497 {
498  bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
499 }
500 
507 #define cpumask_any(srcp) cpumask_first(srcp)
508 
516 #define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
517 
525 #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
526 
531 #define cpumask_of(cpu) (get_cpu_mask(cpu))
532 
542 static inline int cpumask_scnprintf(char *buf, int len,
543  const struct cpumask *srcp)
544 {
545  return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits);
546 }
547 
556 static inline int cpumask_parse_user(const char __user *buf, int len,
557  struct cpumask *dstp)
558 {
559  return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
560 }
561 
570 static inline int cpumask_parselist_user(const char __user *buf, int len,
571  struct cpumask *dstp)
572 {
573  return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
575 }
576 
586 static inline int cpulist_scnprintf(char *buf, int len,
587  const struct cpumask *srcp)
588 {
589  return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp),
591 }
592 
600 static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
601 {
602  return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
603 }
604 
610 static inline size_t cpumask_size(void)
611 {
612  /* FIXME: Once all cpumask assignments are eliminated, this
613  * can be nr_cpumask_bits */
614  return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
615 }
616 
617 /*
618  * cpumask_var_t: struct cpumask for stack usage.
619  *
620  * Oh, the wicked games we play! In order to make kernel coding a
621  * little more difficult, we typedef cpumask_var_t to an array or a
622  * pointer: doing &mask on an array is a noop, so it still works.
623  *
624  * ie.
625  * cpumask_var_t tmpmask;
626  * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
627  * return -ENOMEM;
628  *
629  * ... use 'tmpmask' like a normal struct cpumask * ...
630  *
631  * free_cpumask_var(tmpmask);
632  *
633  *
634  * However, one notable exception is there. alloc_cpumask_var() allocates
635  * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
636  * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
637  *
638  * cpumask_var_t tmpmask;
639  * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
640  * return -ENOMEM;
641  *
642  * var = *tmpmask;
643  *
644  * This code makes NR_CPUS length memcopy and brings to a memory corruption.
645  * cpumask_copy() provide safe copy functionality.
646  */
647 #ifdef CONFIG_CPUMASK_OFFSTACK
648 typedef struct cpumask *cpumask_var_t;
649 
650 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
651 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
652 bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
653 bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
654 void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
655 void free_cpumask_var(cpumask_var_t mask);
656 void free_bootmem_cpumask_var(cpumask_var_t mask);
657 
658 #else
659 typedef struct cpumask cpumask_var_t[1];
660 
661 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
662 {
663  return true;
664 }
665 
666 static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
667  int node)
668 {
669  return true;
670 }
671 
672 static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
673 {
674  cpumask_clear(*mask);
675  return true;
676 }
677 
678 static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
679  int node)
680 {
681  cpumask_clear(*mask);
682  return true;
683 }
684 
685 static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
686 {
687 }
688 
689 static inline void free_cpumask_var(cpumask_var_t mask)
690 {
691 }
692 
693 static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
694 {
695 }
696 #endif /* CONFIG_CPUMASK_OFFSTACK */
697 
698 /* It's common to want to use cpu_all_mask in struct member initializers,
699  * so it has to refer to an address rather than a pointer. */
700 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
701 #define cpu_all_mask to_cpumask(cpu_all_bits)
702 
703 /* First bits of cpu_bit_bitmap are in fact unset. */
704 #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
705 
706 #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
707 #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
708 #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
709 
710 /* Wrappers for arch boot code to manipulate normally-constant masks */
711 void set_cpu_possible(unsigned int cpu, bool possible);
712 void set_cpu_present(unsigned int cpu, bool present);
713 void set_cpu_online(unsigned int cpu, bool online);
714 void set_cpu_active(unsigned int cpu, bool active);
715 void init_cpu_present(const struct cpumask *src);
716 void init_cpu_possible(const struct cpumask *src);
717 void init_cpu_online(const struct cpumask *src);
718 
729 #define to_cpumask(bitmap) \
730  ((struct cpumask *)(1 ? (bitmap) \
731  : (void *)sizeof(__check_is_bitmap(bitmap))))
732 
733 static inline int __check_is_bitmap(const unsigned long *bitmap)
734 {
735  return 1;
736 }
737 
738 /*
739  * Special-case data structure for "single bit set only" constant CPU masks.
740  *
741  * We pre-generate all the 64 (or 32) possible bit positions, with enough
742  * padding to the left and the right, and return the constant pointer
743  * appropriately offset.
744  */
745 extern const unsigned long
747 
748 static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
749 {
750  const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
751  p -= cpu / BITS_PER_LONG;
752  return to_cpumask(p);
753 }
754 
755 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
756 
757 #if NR_CPUS <= BITS_PER_LONG
758 #define CPU_BITS_ALL \
759 { \
760  [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
761 }
762 
763 #else /* NR_CPUS > BITS_PER_LONG */
764 
765 #define CPU_BITS_ALL \
766 { \
767  [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
768  [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
769 }
770 #endif /* NR_CPUS > BITS_PER_LONG */
771 
772 /*
773  *
774  * From here down, all obsolete. Use cpumask_ variants!
775  *
776  */
777 #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
778 #define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
779 
780 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
781 
782 #if NR_CPUS <= BITS_PER_LONG
783 
784 #define CPU_MASK_ALL \
785 (cpumask_t) { { \
786  [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
787 } }
788 
789 #else
790 
791 #define CPU_MASK_ALL \
792 (cpumask_t) { { \
793  [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
794  [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
795 } }
796 
797 #endif
798 
799 #define CPU_MASK_NONE \
800 (cpumask_t) { { \
801  [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
802 } }
803 
804 #define CPU_MASK_CPU0 \
805 (cpumask_t) { { \
806  [0] = 1UL \
807 } }
808 
809 #if NR_CPUS == 1
810 #define first_cpu(src) ({ (void)(src); 0; })
811 #define next_cpu(n, src) ({ (void)(src); 1; })
812 #define any_online_cpu(mask) 0
813 #define for_each_cpu_mask(cpu, mask) \
814  for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
815 #else /* NR_CPUS > 1 */
816 int __first_cpu(const cpumask_t *srcp);
817 int __next_cpu(int n, const cpumask_t *srcp);
818 
819 #define first_cpu(src) __first_cpu(&(src))
820 #define next_cpu(n, src) __next_cpu((n), &(src))
821 #define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask)
822 #define for_each_cpu_mask(cpu, mask) \
823  for ((cpu) = -1; \
824  (cpu) = next_cpu((cpu), (mask)), \
825  (cpu) < NR_CPUS; )
826 #endif /* SMP */
827 
828 #if NR_CPUS <= 64
829 
830 #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
831 
832 #else /* NR_CPUS > 64 */
833 
834 int __next_cpu_nr(int n, const cpumask_t *srcp);
835 #define for_each_cpu_mask_nr(cpu, mask) \
836  for ((cpu) = -1; \
837  (cpu) = __next_cpu_nr((cpu), &(mask)), \
838  (cpu) < nr_cpu_ids; )
839 
840 #endif /* NR_CPUS > 64 */
841 
842 #define cpus_addr(src) ((src).bits)
843 
844 #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
845 static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
846 {
847  set_bit(cpu, dstp->bits);
848 }
849 
850 #define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
851 static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
852 {
853  clear_bit(cpu, dstp->bits);
854 }
855 
856 #define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
857 static inline void __cpus_setall(cpumask_t *dstp, int nbits)
858 {
859  bitmap_fill(dstp->bits, nbits);
860 }
861 
862 #define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
863 static inline void __cpus_clear(cpumask_t *dstp, int nbits)
864 {
865  bitmap_zero(dstp->bits, nbits);
866 }
867 
868 /* No static inline type checking - see Subtlety (1) above. */
869 #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
870 
871 #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
872 static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
873 {
874  return test_and_set_bit(cpu, addr->bits);
875 }
876 
877 #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
878 static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
879  const cpumask_t *src2p, int nbits)
880 {
881  return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
882 }
883 
884 #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
885 static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
886  const cpumask_t *src2p, int nbits)
887 {
888  bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
889 }
890 
891 #define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
892 static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
893  const cpumask_t *src2p, int nbits)
894 {
895  bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
896 }
897 
898 #define cpus_andnot(dst, src1, src2) \
899  __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
900 static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
901  const cpumask_t *src2p, int nbits)
902 {
903  return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
904 }
905 
906 #define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
907 static inline int __cpus_equal(const cpumask_t *src1p,
908  const cpumask_t *src2p, int nbits)
909 {
910  return bitmap_equal(src1p->bits, src2p->bits, nbits);
911 }
912 
913 #define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
914 static inline int __cpus_intersects(const cpumask_t *src1p,
915  const cpumask_t *src2p, int nbits)
916 {
917  return bitmap_intersects(src1p->bits, src2p->bits, nbits);
918 }
919 
920 #define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
921 static inline int __cpus_subset(const cpumask_t *src1p,
922  const cpumask_t *src2p, int nbits)
923 {
924  return bitmap_subset(src1p->bits, src2p->bits, nbits);
925 }
926 
927 #define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
928 static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
929 {
930  return bitmap_empty(srcp->bits, nbits);
931 }
932 
933 #define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
934 static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
935 {
936  return bitmap_weight(srcp->bits, nbits);
937 }
938 
939 #define cpus_shift_left(dst, src, n) \
940  __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
941 static inline void __cpus_shift_left(cpumask_t *dstp,
942  const cpumask_t *srcp, int n, int nbits)
943 {
944  bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
945 }
946 #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
947 
948 #endif /* __LINUX_CPUMASK_H */