Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cvmx-pow.h
Go to the documentation of this file.
1 /***********************license start***************
2  * Author: Cavium Networks
3  *
4  * Contact: [email protected]
5  * This file is part of the OCTEON SDK
6  *
7  * Copyright (c) 2003-2008 Cavium Networks
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT. See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this file; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22  * or visit http://www.gnu.org/licenses/.
23  *
24  * This file may also be available under a different license from Cavium.
25  * Contact Cavium Networks for more information
26  ***********************license end**************************************/
27 
51 #ifndef __CVMX_POW_H__
52 #define __CVMX_POW_H__
53 
55 
57 #include <asm/octeon/cvmx-wqe.h>
58 
59 /* Default to having all POW constancy checks turned on */
60 #ifndef CVMX_ENABLE_POW_CHECKS
61 #define CVMX_ENABLE_POW_CHECKS 1
62 #endif
63 
65  /* Tag ordering is maintained */
67  /* Tag ordering is maintained, and at most one PP has the tag */
69  /*
70  * The work queue entry from the order - NEVER tag switch from
71  * NULL to NULL
72  */
74  /* A tag switch to NULL, and there is no space reserved in POW
75  * - NEVER tag switch to NULL_NULL
76  * - NEVER tag switch from NULL_NULL
77  * - NULL_NULL is entered at the beginning of time and on a deschedule.
78  * - NULL_NULL can be exited by a new work request. A NULL_SWITCH
79  * load can also switch the state to NULL
80  */
82 };
83 
87 typedef enum {
91 
95 typedef enum {
96  /*
97  * switch the tag (only) for this PP
98  * - the previous tag should be non-NULL in this case
99  * - tag switch response required
100  * - fields used: op, type, tag
101  */
103  /*
104  * switch the tag for this PP, with full information
105  * - this should be used when the previous tag is NULL
106  * - tag switch response required
107  * - fields used: address, op, grp, type, tag
108  */
110  /*
111  * switch the tag (and/or group) for this PP and de-schedule
112  * - OK to keep the tag the same and only change the group
113  * - fields used: op, no_sched, grp, type, tag
114  */
116  /*
117  * just de-schedule
118  * - fields used: op, no_sched
119  */
121  /*
122  * create an entirely new work queue entry
123  * - fields used: address, op, qos, grp, type, tag
124  */
126  /*
127  * just update the work queue pointer and grp for this PP
128  * - fields used: address, op, grp
129  */
131  /*
132  * set the no_sched bit on the de-schedule list
133  *
134  * - does nothing if the selected entry is not on the
135  * de-schedule list
136  *
137  * - does nothing if the stored work queue pointer does not
138  * match the address field
139  *
140  * - fields used: address, index, op
141  *
142  * Before issuing a *_NSCHED operation, SW must guarantee
143  * that all prior deschedules and set/clr NSCHED operations
144  * are complete and all prior switches are complete. The
145  * hardware provides the opsdone bit and swdone bit for SW
146  * polling. After issuing a *_NSCHED operation, SW must
147  * guarantee that the set/clr NSCHED is complete before any
148  * subsequent operations.
149  */
151  /*
152  * clears the no_sched bit on the de-schedule list
153  *
154  * - does nothing if the selected entry is not on the
155  * de-schedule list
156  *
157  * - does nothing if the stored work queue pointer does not
158  * match the address field
159  *
160  * - fields used: address, index, op
161  *
162  * Before issuing a *_NSCHED operation, SW must guarantee that
163  * all prior deschedules and set/clr NSCHED operations are
164  * complete and all prior switches are complete. The hardware
165  * provides the opsdone bit and swdone bit for SW
166  * polling. After issuing a *_NSCHED operation, SW must
167  * guarantee that the set/clr NSCHED is complete before any
168  * subsequent operations.
169  */
171  /* do nothing */
174 
178 typedef union {
180  struct {
181  /*
182  * Don't reschedule this entry. no_sched is used for
183  * CVMX_POW_TAG_OP_SWTAG_DESCH and
184  * CVMX_POW_TAG_OP_DESCH
185  */
186  uint64_t no_sched:1;
188  /* Tontains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
190  /* The operation to perform */
193  /*
194  * The QOS level for the packet. qos is only used for
195  * CVMX_POW_TAG_OP_ADDWQ
196  */
197  uint64_t qos:3;
198  /*
199  * The group that the work queue entry will be
200  * scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ,
201  * CVMX_POW_TAG_OP_SWTAG_FULL,
202  * CVMX_POW_TAG_OP_SWTAG_DESCH, and
203  * CVMX_POW_TAG_OP_UPDATE_WQP_GRP
204  */
205  uint64_t grp:4;
206  /*
207  * The type of the tag. type is used for everything
208  * except CVMX_POW_TAG_OP_DESCH,
209  * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
210  * CVMX_POW_TAG_OP_*_NSCHED
211  */
213  /*
214  * The actual tag. tag is used for everything except
215  * CVMX_POW_TAG_OP_DESCH,
216  * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
217  * CVMX_POW_TAG_OP_*_NSCHED
218  */
220  } s;
222 
226 typedef union {
228 
232  struct {
233  /* Mips64 address region. Should be CVMX_IO_SEG */
235  /* Must be zero */
236  uint64_t reserved_49_61:13;
237  /* Must be one */
238  uint64_t is_io:1;
239  /* the ID of POW -- did<2:0> == 0 in this case */
240  uint64_t did:8;
241  /* Must be zero */
242  uint64_t reserved_4_39:36;
243  /*
244  * If set, don't return load response until work is
245  * available.
246  */
248  /* Must be zero */
249  uint64_t reserved_0_2:3;
250  } swork;
251 
255  struct {
256  /* Mips64 address region. Should be CVMX_IO_SEG */
258  /* Must be zero */
259  uint64_t reserved_49_61:13;
260  /* Must be one */
261  uint64_t is_io:1;
262  /* the ID of POW -- did<2:0> == 1 in this case */
263  uint64_t did:8;
264  /* Must be zero */
265  uint64_t reserved_10_39:30;
266  /* The core id to get status for */
267  uint64_t coreid:4;
268  /*
269  * If set and get_cur is set, return reverse tag-list
270  * pointer rather than forward tag-list pointer.
271  */
272  uint64_t get_rev:1;
273  /*
274  * If set, return current status rather than pending
275  * status.
276  */
277  uint64_t get_cur:1;
278  /*
279  * If set, get the work-queue pointer rather than
280  * tag/type.
281  */
282  uint64_t get_wqp:1;
283  /* Must be zero */
284  uint64_t reserved_0_2:3;
285  } sstatus;
286 
290  struct {
291  /* Mips64 address region. Should be CVMX_IO_SEG */
293  /* Must be zero */
294  uint64_t reserved_49_61:13;
295  /* Must be one */
296  uint64_t is_io:1;
297  /* the ID of POW -- did<2:0> == 2 in this case */
298  uint64_t did:8;
299  /* Must be zero */
300  uint64_t reserved_16_39:24;
301  /* POW memory index */
303  /*
304  * If set, return deschedule information rather than
305  * the standard response for work-queue index (invalid
306  * if the work-queue entry is not on the deschedule
307  * list).
308  */
309  uint64_t get_des:1;
310  /*
311  * If set, get the work-queue pointer rather than
312  * tag/type (no effect when get_des set).
313  */
314  uint64_t get_wqp:1;
315  /* Must be zero */
316  uint64_t reserved_0_2:3;
317  } smemload;
318 
322  struct {
323  /* Mips64 address region. Should be CVMX_IO_SEG */
325  /* Must be zero */
326  uint64_t reserved_49_61:13;
327  /* Must be one */
328  uint64_t is_io:1;
329  /* the ID of POW -- did<2:0> == 3 in this case */
330  uint64_t did:8;
331  /* Must be zero */
332  uint64_t reserved_9_39:31;
333  /*
334  * when {get_rmt ==0 AND get_des_get_tail == 0}, this
335  * field selects one of eight POW internal-input
336  * queues (0-7), one per QOS level; values 8-15 are
337  * illegal in this case; when {get_rmt ==0 AND
338  * get_des_get_tail == 1}, this field selects one of
339  * 16 deschedule lists (per group); when get_rmt ==1,
340  * this field selects one of 16 memory-input queue
341  * lists. The two memory-input queue lists associated
342  * with each QOS level are:
343  *
344  * - qosgrp = 0, qosgrp = 8: QOS0
345  * - qosgrp = 1, qosgrp = 9: QOS1
346  * - qosgrp = 2, qosgrp = 10: QOS2
347  * - qosgrp = 3, qosgrp = 11: QOS3
348  * - qosgrp = 4, qosgrp = 12: QOS4
349  * - qosgrp = 5, qosgrp = 13: QOS5
350  * - qosgrp = 6, qosgrp = 14: QOS6
351  * - qosgrp = 7, qosgrp = 15: QOS7
352  */
353  uint64_t qosgrp:4;
354  /*
355  * If set and get_rmt is clear, return deschedule list
356  * indexes rather than indexes for the specified qos
357  * level; if set and get_rmt is set, return the tail
358  * pointer rather than the head pointer for the
359  * specified qos level.
360  */
361  uint64_t get_des_get_tail:1;
362  /*
363  * If set, return remote pointers rather than the
364  * local indexes for the specified qos level.
365  */
366  uint64_t get_rmt:1;
367  /* Must be zero */
368  uint64_t reserved_0_2:3;
369  } sindexload;
370 
379  struct {
380  /* Mips64 address region. Should be CVMX_IO_SEG */
382  /* Must be zero */
383  uint64_t reserved_49_61:13;
384  /* Must be one */
385  uint64_t is_io:1;
386  /* the ID of POW -- did<2:0> == 4 in this case */
387  uint64_t did:8;
388  /* Must be zero */
389  uint64_t reserved_0_39:40;
390  } snull_rd;
392 
397 typedef union {
399 
403  struct {
404  /*
405  * Set when no new work queue entry was returned. *
406  * If there was de-scheduled work, the HW will
407  * definitely return it. When this bit is set, it
408  * could mean either mean:
409  *
410  * - There was no work, or
411  *
412  * - There was no work that the HW could find. This
413  * case can happen, regardless of the wait bit value
414  * in the original request, when there is work in
415  * the IQ's that is too deep down the list.
416  */
417  uint64_t no_work:1;
418  /* Must be zero */
419  uint64_t reserved_40_62:23;
420  /* 36 in O1 -- the work queue pointer */
422  } s_work;
423 
427  struct {
428  uint64_t reserved_62_63:2;
429  /* Set when there is a pending non-NULL SWTAG or
430  * SWTAG_FULL, and the POW entry has not left the list
431  * for the original tag. */
432  uint64_t pend_switch:1;
433  /* Set when SWTAG_FULL and pend_switch is set. */
434  uint64_t pend_switch_full:1;
435  /*
436  * Set when there is a pending NULL SWTAG, or an
437  * implicit switch to NULL.
438  */
439  uint64_t pend_switch_null:1;
440  /* Set when there is a pending DESCHED or SWTAG_DESCHED. */
441  uint64_t pend_desched:1;
442  /*
443  * Set when there is a pending SWTAG_DESCHED and
444  * pend_desched is set.
445  */
446  uint64_t pend_desched_switch:1;
447  /* Set when nosched is desired and pend_desched is set. */
448  uint64_t pend_nosched:1;
449  /* Set when there is a pending GET_WORK. */
450  uint64_t pend_new_work:1;
451  /*
452  * When pend_new_work is set, this bit indicates that
453  * the wait bit was set.
454  */
455  uint64_t pend_new_work_wait:1;
456  /* Set when there is a pending NULL_RD. */
457  uint64_t pend_null_rd:1;
458  /* Set when there is a pending CLR_NSCHED. */
459  uint64_t pend_nosched_clr:1;
460  uint64_t reserved_51:1;
461  /* This is the index when pend_nosched_clr is set. */
462  uint64_t pend_index:11;
463  /*
464  * This is the new_grp when (pend_desched AND
465  * pend_desched_switch) is set.
466  */
467  uint64_t pend_grp:4;
468  uint64_t reserved_34_35:2;
469  /*
470  * This is the tag type when pend_switch or
471  * (pend_desched AND pend_desched_switch) are set.
472  */
473  uint64_t pend_type:2;
474  /*
475  * - this is the tag when pend_switch or (pend_desched
476  * AND pend_desched_switch) are set.
477  */
478  uint64_t pend_tag:32;
479  } s_sstatus0;
480 
484  struct {
485  uint64_t reserved_62_63:2;
486  /*
487  * Set when there is a pending non-NULL SWTAG or
488  * SWTAG_FULL, and the POW entry has not left the list
489  * for the original tag.
490  */
491  uint64_t pend_switch:1;
492  /* Set when SWTAG_FULL and pend_switch is set. */
493  uint64_t pend_switch_full:1;
494  /*
495  * Set when there is a pending NULL SWTAG, or an
496  * implicit switch to NULL.
497  */
498  uint64_t pend_switch_null:1;
499  /*
500  * Set when there is a pending DESCHED or
501  * SWTAG_DESCHED.
502  */
503  uint64_t pend_desched:1;
504  /*
505  * Set when there is a pending SWTAG_DESCHED and
506  * pend_desched is set.
507  */
508  uint64_t pend_desched_switch:1;
509  /* Set when nosched is desired and pend_desched is set. */
510  uint64_t pend_nosched:1;
511  /* Set when there is a pending GET_WORK. */
512  uint64_t pend_new_work:1;
513  /*
514  * When pend_new_work is set, this bit indicates that
515  * the wait bit was set.
516  */
517  uint64_t pend_new_work_wait:1;
518  /* Set when there is a pending NULL_RD. */
519  uint64_t pend_null_rd:1;
520  /* Set when there is a pending CLR_NSCHED. */
521  uint64_t pend_nosched_clr:1;
522  uint64_t reserved_51:1;
523  /* This is the index when pend_nosched_clr is set. */
524  uint64_t pend_index:11;
525  /*
526  * This is the new_grp when (pend_desched AND
527  * pend_desched_switch) is set.
528  */
529  uint64_t pend_grp:4;
530  /* This is the wqp when pend_nosched_clr is set. */
531  uint64_t pend_wqp:36;
532  } s_sstatus1;
533 
538  struct {
539  uint64_t reserved_62_63:2;
540  /*
541  * Points to the next POW entry in the tag list when
542  * tail == 0 (and tag_type is not NULL or NULL_NULL).
543  */
544  uint64_t link_index:11;
545  /* The POW entry attached to the core. */
547  /*
548  * The group attached to the core (updated when new
549  * tag list entered on SWTAG_FULL).
550  */
551  uint64_t grp:4;
552  /*
553  * Set when this POW entry is at the head of its tag
554  * list (also set when in the NULL or NULL_NULL
555  * state).
556  */
558  /*
559  * Set when this POW entry is at the tail of its tag
560  * list (also set when in the NULL or NULL_NULL
561  * state).
562  */
564  /*
565  * The tag type attached to the core (updated when new
566  * tag list entered on SWTAG, SWTAG_FULL, or
567  * SWTAG_DESCHED).
568  */
570  /*
571  * The tag attached to the core (updated when new tag
572  * list entered on SWTAG, SWTAG_FULL, or
573  * SWTAG_DESCHED).
574  */
576  } s_sstatus2;
577 
581  struct {
582  uint64_t reserved_62_63:2;
583  /*
584  * Points to the prior POW entry in the tag list when
585  * head == 0 (and tag_type is not NULL or
586  * NULL_NULL). This field is unpredictable when the
587  * core's state is NULL or NULL_NULL.
588  */
589  uint64_t revlink_index:11;
590  /* The POW entry attached to the core. */
591  uint64_t index:11;
592  /*
593  * The group attached to the core (updated when new
594  * tag list entered on SWTAG_FULL).
595  */
596  uint64_t grp:4;
597  /* Set when this POW entry is at the head of its tag
598  * list (also set when in the NULL or NULL_NULL
599  * state).
600  */
601  uint64_t head:1;
602  /*
603  * Set when this POW entry is at the tail of its tag
604  * list (also set when in the NULL or NULL_NULL
605  * state).
606  */
607  uint64_t tail:1;
608  /*
609  * The tag type attached to the core (updated when new
610  * tag list entered on SWTAG, SWTAG_FULL, or
611  * SWTAG_DESCHED).
612  */
613  uint64_t tag_type:2;
614  /*
615  * The tag attached to the core (updated when new tag
616  * list entered on SWTAG, SWTAG_FULL, or
617  * SWTAG_DESCHED).
618  */
619  uint64_t tag:32;
620  } s_sstatus3;
621 
626  struct {
627  uint64_t reserved_62_63:2;
628  /*
629  * Points to the next POW entry in the tag list when
630  * tail == 0 (and tag_type is not NULL or NULL_NULL).
631  */
632  uint64_t link_index:11;
633  /* The POW entry attached to the core. */
634  uint64_t index:11;
635  /*
636  * The group attached to the core (updated when new
637  * tag list entered on SWTAG_FULL).
638  */
639  uint64_t grp:4;
640  /*
641  * The wqp attached to the core (updated when new tag
642  * list entered on SWTAG_FULL).
643  */
644  uint64_t wqp:36;
645  } s_sstatus4;
646 
651  struct {
652  uint64_t reserved_62_63:2;
653  /*
654  * Points to the prior POW entry in the tag list when
655  * head == 0 (and tag_type is not NULL or
656  * NULL_NULL). This field is unpredictable when the
657  * core's state is NULL or NULL_NULL.
658  */
659  uint64_t revlink_index:11;
660  /* The POW entry attached to the core. */
661  uint64_t index:11;
662  /*
663  * The group attached to the core (updated when new
664  * tag list entered on SWTAG_FULL).
665  */
666  uint64_t grp:4;
667  /*
668  * The wqp attached to the core (updated when new tag
669  * list entered on SWTAG_FULL).
670  */
671  uint64_t wqp:36;
672  } s_sstatus5;
673 
677  struct {
678  uint64_t reserved_51_63:13;
679  /*
680  * The next entry in the input, free, descheduled_head
681  * list (unpredictable if entry is the tail of the
682  * list).
683  */
684  uint64_t next_index:11;
685  /* The group of the POW entry. */
686  uint64_t grp:4;
687  uint64_t reserved_35:1;
688  /*
689  * Set when this POW entry is at the tail of its tag
690  * list (also set when in the NULL or NULL_NULL
691  * state).
692  */
693  uint64_t tail:1;
694  /* The tag type of the POW entry. */
695  uint64_t tag_type:2;
696  /* The tag of the POW entry. */
697  uint64_t tag:32;
698  } s_smemload0;
699 
703  struct {
704  uint64_t reserved_51_63:13;
705  /*
706  * The next entry in the input, free, descheduled_head
707  * list (unpredictable if entry is the tail of the
708  * list).
709  */
710  uint64_t next_index:11;
711  /* The group of the POW entry. */
712  uint64_t grp:4;
713  /* The WQP held in the POW entry. */
714  uint64_t wqp:36;
715  } s_smemload1;
716 
720  struct {
721  uint64_t reserved_51_63:13;
722  /*
723  * The next entry in the tag list connected to the
724  * descheduled head.
725  */
726  uint64_t fwd_index:11;
727  /* The group of the POW entry. */
728  uint64_t grp:4;
729  /* The nosched bit for the POW entry. */
730  uint64_t nosched:1;
731  /* There is a pending tag switch */
732  uint64_t pend_switch:1;
733  /*
734  * The next tag type for the new tag list when
735  * pend_switch is set.
736  */
737  uint64_t pend_type:2;
738  /*
739  * The next tag for the new tag list when pend_switch
740  * is set.
741  */
742  uint64_t pend_tag:32;
743  } s_smemload2;
744 
748  struct {
749  uint64_t reserved_52_63:12;
750  /*
751  * set when there is one or more POW entries on the
752  * free list.
753  */
754  uint64_t free_val:1;
755  /*
756  * set when there is exactly one POW entry on the free
757  * list.
758  */
759  uint64_t free_one:1;
760  uint64_t reserved_49:1;
761  /*
762  * when free_val is set, indicates the first entry on
763  * the free list.
764  */
765  uint64_t free_head:11;
766  uint64_t reserved_37:1;
767  /*
768  * when free_val is set, indicates the last entry on
769  * the free list.
770  */
771  uint64_t free_tail:11;
772  /*
773  * set when there is one or more POW entries on the
774  * input Q list selected by qosgrp.
775  */
776  uint64_t loc_val:1;
777  /*
778  * set when there is exactly one POW entry on the
779  * input Q list selected by qosgrp.
780  */
781  uint64_t loc_one:1;
782  uint64_t reserved_23:1;
783  /*
784  * when loc_val is set, indicates the first entry on
785  * the input Q list selected by qosgrp.
786  */
787  uint64_t loc_head:11;
788  uint64_t reserved_11:1;
789  /*
790  * when loc_val is set, indicates the last entry on
791  * the input Q list selected by qosgrp.
792  */
793  uint64_t loc_tail:11;
794  } sindexload0;
795 
799  struct {
800  uint64_t reserved_52_63:12;
801  /*
802  * set when there is one or more POW entries on the
803  * nosched list.
804  */
805  uint64_t nosched_val:1;
806  /*
807  * set when there is exactly one POW entry on the
808  * nosched list.
809  */
810  uint64_t nosched_one:1;
811  uint64_t reserved_49:1;
812  /*
813  * when nosched_val is set, indicates the first entry
814  * on the nosched list.
815  */
816  uint64_t nosched_head:11;
817  uint64_t reserved_37:1;
818  /*
819  * when nosched_val is set, indicates the last entry
820  * on the nosched list.
821  */
822  uint64_t nosched_tail:11;
823  /*
824  * set when there is one or more descheduled heads on
825  * the descheduled list selected by qosgrp.
826  */
827  uint64_t des_val:1;
828  /*
829  * set when there is exactly one descheduled head on
830  * the descheduled list selected by qosgrp.
831  */
832  uint64_t des_one:1;
833  uint64_t reserved_23:1;
834  /*
835  * when des_val is set, indicates the first
836  * descheduled head on the descheduled list selected
837  * by qosgrp.
838  */
839  uint64_t des_head:11;
840  uint64_t reserved_11:1;
841  /*
842  * when des_val is set, indicates the last descheduled
843  * head on the descheduled list selected by qosgrp.
844  */
845  uint64_t des_tail:11;
846  } sindexload1;
847 
851  struct {
852  uint64_t reserved_39_63:25;
853  /*
854  * Set when this DRAM list is the current head
855  * (i.e. is the next to be reloaded when the POW
856  * hardware reloads a POW entry from DRAM). The POW
857  * hardware alternates between the two DRAM lists
858  * associated with a QOS level when it reloads work
859  * from DRAM into the POW unit.
860  */
861  uint64_t rmt_is_head:1;
862  /*
863  * Set when the DRAM portion of the input Q list
864  * selected by qosgrp contains one or more pieces of
865  * work.
866  */
867  uint64_t rmt_val:1;
868  /*
869  * Set when the DRAM portion of the input Q list
870  * selected by qosgrp contains exactly one piece of
871  * work.
872  */
873  uint64_t rmt_one:1;
874  /*
875  * When rmt_val is set, indicates the first piece of
876  * work on the DRAM input Q list selected by
877  * qosgrp.
878  */
879  uint64_t rmt_head:36;
880  } sindexload2;
881 
886  struct {
887  uint64_t reserved_39_63:25;
888  /*
889  * set when this DRAM list is the current head
890  * (i.e. is the next to be reloaded when the POW
891  * hardware reloads a POW entry from DRAM). The POW
892  * hardware alternates between the two DRAM lists
893  * associated with a QOS level when it reloads work
894  * from DRAM into the POW unit.
895  */
896  uint64_t rmt_is_head:1;
897  /*
898  * set when the DRAM portion of the input Q list
899  * selected by qosgrp contains one or more pieces of
900  * work.
901  */
902  uint64_t rmt_val:1;
903  /*
904  * set when the DRAM portion of the input Q list
905  * selected by qosgrp contains exactly one piece of
906  * work.
907  */
908  uint64_t rmt_one:1;
909  /*
910  * when rmt_val is set, indicates the last piece of
911  * work on the DRAM input Q list selected by
912  * qosgrp.
913  */
914  uint64_t rmt_tail:36;
915  } sindexload3;
916 
920  struct {
922  /* of type cvmx_pow_tag_type_t. state is one of the
923  * following:
924  *
925  * - CVMX_POW_TAG_TYPE_ORDERED
926  * - CVMX_POW_TAG_TYPE_ATOMIC
927  * - CVMX_POW_TAG_TYPE_NULL
928  * - CVMX_POW_TAG_TYPE_NULL_NULL
929  */
931  } s_null_rd;
932 
934 
958 typedef union {
963 
964  struct {
965  /* Memory region. Should be CVMX_IO_SEG in most cases */
966  uint64_t mem_reg:2;
967  uint64_t reserved_49_61:13; /* Must be zero */
968  uint64_t is_io:1; /* Must be one */
969  /* Device ID of POW. Note that different sub-dids are used. */
970  uint64_t did:8;
971  uint64_t reserved_36_39:4; /* Must be zero */
972  /* Address field. addr<2:0> must be zero */
974  } stag;
976 
980 typedef union {
982 
983  struct {
984  /*
985  * the (64-bit word) location in scratchpad to write
986  * to (if len != 0)
987  */
988  uint64_t scraddr:8;
989  /* the number of words in the response (0 => no response) */
991  /* the ID of the device on the non-coherent bus */
992  uint64_t did:8;
994  /* if set, don't return load response until work is available */
997  } s;
998 
1000 
1001 /* CSR typedefs have been moved to cvmx-csr-*.h */
1002 
1012 static inline cvmx_pow_tag_req_t cvmx_pow_get_current_tag(void)
1013 {
1014  cvmx_pow_load_addr_t load_addr;
1015  cvmx_pow_tag_load_resp_t load_resp;
1017 
1018  load_addr.u64 = 0;
1019  load_addr.sstatus.mem_region = CVMX_IO_SEG;
1020  load_addr.sstatus.is_io = 1;
1021  load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
1022  load_addr.sstatus.coreid = cvmx_get_core_num();
1023  load_addr.sstatus.get_cur = 1;
1024  load_resp.u64 = cvmx_read_csr(load_addr.u64);
1025  result.u64 = 0;
1026  result.s.grp = load_resp.s_sstatus2.grp;
1027  result.s.index = load_resp.s_sstatus2.index;
1028  result.s.type = load_resp.s_sstatus2.tag_type;
1029  result.s.tag = load_resp.s_sstatus2.tag;
1030  return result;
1031 }
1032 
1039 static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
1040 {
1041  cvmx_pow_load_addr_t load_addr;
1042  cvmx_pow_tag_load_resp_t load_resp;
1043 
1044  load_addr.u64 = 0;
1045  load_addr.sstatus.mem_region = CVMX_IO_SEG;
1046  load_addr.sstatus.is_io = 1;
1047  load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
1048  load_addr.sstatus.coreid = cvmx_get_core_num();
1049  load_addr.sstatus.get_cur = 1;
1050  load_addr.sstatus.get_wqp = 1;
1051  load_resp.u64 = cvmx_read_csr(load_addr.u64);
1052  return (cvmx_wqe_t *) cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
1053 }
1054 
1055 #ifndef CVMX_MF_CHORD
1056 #define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
1057 #endif
1058 
1064 static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
1065 {
1066  uint64_t switch_complete;
1067  CVMX_MF_CHORD(switch_complete);
1068  if (!switch_complete)
1069  pr_warning("%s called with tag switch in progress\n", function);
1070 }
1071 
1077 static inline void cvmx_pow_tag_sw_wait(void)
1078 {
1079  const uint64_t MAX_CYCLES = 1ull << 31;
1080  uint64_t switch_complete;
1081  uint64_t start_cycle = cvmx_get_cycle();
1082  while (1) {
1083  CVMX_MF_CHORD(switch_complete);
1084  if (unlikely(switch_complete))
1085  break;
1086  if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) {
1087  pr_warning("Tag switch is taking a long time, "
1088  "possible deadlock\n");
1089  start_cycle = -MAX_CYCLES - 1;
1090  }
1091  }
1092 }
1093 
1105 static inline cvmx_wqe_t *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t
1106  wait)
1107 {
1110 
1112  __cvmx_pow_warn_if_pending_switch(__func__);
1113 
1114  ptr.u64 = 0;
1115  ptr.swork.mem_region = CVMX_IO_SEG;
1116  ptr.swork.is_io = 1;
1117  ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
1118  ptr.swork.wait = wait;
1119 
1120  result.u64 = cvmx_read_csr(ptr.u64);
1121 
1122  if (result.s_work.no_work)
1123  return NULL;
1124  else
1125  return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.s_work.addr);
1126 }
1127 
1139 static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
1140 {
1142  __cvmx_pow_warn_if_pending_switch(__func__);
1143 
1144  /* Must not have a switch pending when requesting work */
1145  cvmx_pow_tag_sw_wait();
1146  return cvmx_pow_work_request_sync_nocheck(wait);
1147 
1148 }
1149 
1157 static inline enum cvmx_pow_tag_type cvmx_pow_work_request_null_rd(void)
1158 {
1161 
1163  __cvmx_pow_warn_if_pending_switch(__func__);
1164 
1165  /* Must not have a switch pending when requesting work */
1166  cvmx_pow_tag_sw_wait();
1167 
1168  ptr.u64 = 0;
1169  ptr.snull_rd.mem_region = CVMX_IO_SEG;
1170  ptr.snull_rd.is_io = 1;
1172 
1173  result.u64 = cvmx_read_csr(ptr.u64);
1174 
1175  return (enum cvmx_pow_tag_type) result.s_null_rd.state;
1176 }
1177 
1192 static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
1193  cvmx_pow_wait_t wait)
1194 {
1196 
1198  __cvmx_pow_warn_if_pending_switch(__func__);
1199 
1200  /* scr_addr must be 8 byte aligned */
1201  data.s.scraddr = scr_addr >> 3;
1202  data.s.len = 1;
1203  data.s.did = CVMX_OCT_DID_TAG_SWTAG;
1204  data.s.wait = wait;
1205  cvmx_send_single(data.u64);
1206 }
1207 
1221 static inline void cvmx_pow_work_request_async(int scr_addr,
1222  cvmx_pow_wait_t wait)
1223 {
1225  __cvmx_pow_warn_if_pending_switch(__func__);
1226 
1227  /* Must not have a switch pending when requesting work */
1228  cvmx_pow_tag_sw_wait();
1229  cvmx_pow_work_request_async_nocheck(scr_addr, wait);
1230 }
1231 
1242 static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
1243 {
1245 
1247  result.u64 = cvmx_scratch_read64(scr_addr);
1248 
1249  if (result.s_work.no_work)
1250  return NULL;
1251  else
1252  return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.s_work.addr);
1253 }
1254 
1265 static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
1266 {
1267  return wqe_ptr == NULL;
1268 }
1269 
1288 static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
1290 {
1291  cvmx_addr_t ptr;
1292  cvmx_pow_tag_req_t tag_req;
1293 
1294  if (CVMX_ENABLE_POW_CHECKS) {
1296  __cvmx_pow_warn_if_pending_switch(__func__);
1297  current_tag = cvmx_pow_get_current_tag();
1298  if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1299  pr_warning("%s called with NULL_NULL tag\n",
1300  __func__);
1301  if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1302  pr_warning("%s called with NULL tag\n", __func__);
1303  if ((current_tag.s.type == tag_type)
1304  && (current_tag.s.tag == tag))
1305  pr_warning("%s called to perform a tag switch to the "
1306  "same tag\n",
1307  __func__);
1308  if (tag_type == CVMX_POW_TAG_TYPE_NULL)
1309  pr_warning("%s called to perform a tag switch to "
1310  "NULL. Use cvmx_pow_tag_sw_null() instead\n",
1311  __func__);
1312  }
1313 
1314  /*
1315  * Note that WQE in DRAM is not updated here, as the POW does
1316  * not read from DRAM once the WQE is in flight. See hardware
1317  * manual for complete details. It is the application's
1318  * responsibility to keep track of the current tag value if
1319  * that is important.
1320  */
1321 
1322  tag_req.u64 = 0;
1323  tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
1324  tag_req.s.tag = tag;
1325  tag_req.s.type = tag_type;
1326 
1327  ptr.u64 = 0;
1328  ptr.sio.mem_region = CVMX_IO_SEG;
1329  ptr.sio.is_io = 1;
1330  ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1331 
1332  /* once this store arrives at POW, it will attempt the switch
1333  software must wait for the switch to complete separately */
1334  cvmx_write_io(ptr.u64, tag_req.u64);
1335 }
1336 
1355 static inline void cvmx_pow_tag_sw(uint32_t tag,
1356  enum cvmx_pow_tag_type tag_type)
1357 {
1359  __cvmx_pow_warn_if_pending_switch(__func__);
1360 
1361  /*
1362  * Note that WQE in DRAM is not updated here, as the POW does
1363  * not read from DRAM once the WQE is in flight. See hardware
1364  * manual for complete details. It is the application's
1365  * responsibility to keep track of the current tag value if
1366  * that is important.
1367  */
1368 
1369  /*
1370  * Ensure that there is not a pending tag switch, as a tag
1371  * switch cannot be started if a previous switch is still
1372  * pending.
1373  */
1374  cvmx_pow_tag_sw_wait();
1375  cvmx_pow_tag_sw_nocheck(tag, tag_type);
1376 }
1377 
1398 static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
1399  enum cvmx_pow_tag_type tag_type,
1400  uint64_t group)
1401 {
1402  cvmx_addr_t ptr;
1403  cvmx_pow_tag_req_t tag_req;
1404 
1405  if (CVMX_ENABLE_POW_CHECKS) {
1407  __cvmx_pow_warn_if_pending_switch(__func__);
1408  current_tag = cvmx_pow_get_current_tag();
1409  if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1410  pr_warning("%s called with NULL_NULL tag\n",
1411  __func__);
1412  if ((current_tag.s.type == tag_type)
1413  && (current_tag.s.tag == tag))
1414  pr_warning("%s called to perform a tag switch to "
1415  "the same tag\n",
1416  __func__);
1417  if (tag_type == CVMX_POW_TAG_TYPE_NULL)
1418  pr_warning("%s called to perform a tag switch to "
1419  "NULL. Use cvmx_pow_tag_sw_null() instead\n",
1420  __func__);
1421  if (wqp != cvmx_phys_to_ptr(0x80))
1422  if (wqp != cvmx_pow_get_current_wqp())
1423  pr_warning("%s passed WQE(%p) doesn't match "
1424  "the address in the POW(%p)\n",
1425  __func__, wqp,
1426  cvmx_pow_get_current_wqp());
1427  }
1428 
1429  /*
1430  * Note that WQE in DRAM is not updated here, as the POW does
1431  * not read from DRAM once the WQE is in flight. See hardware
1432  * manual for complete details. It is the application's
1433  * responsibility to keep track of the current tag value if
1434  * that is important.
1435  */
1436 
1437  tag_req.u64 = 0;
1438  tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_FULL;
1439  tag_req.s.tag = tag;
1440  tag_req.s.type = tag_type;
1441  tag_req.s.grp = group;
1442 
1443  ptr.u64 = 0;
1444  ptr.sio.mem_region = CVMX_IO_SEG;
1445  ptr.sio.is_io = 1;
1446  ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1447  ptr.sio.offset = CAST64(wqp);
1448 
1449  /*
1450  * once this store arrives at POW, it will attempt the switch
1451  * software must wait for the switch to complete separately.
1452  */
1453  cvmx_write_io(ptr.u64, tag_req.u64);
1454 }
1455 
1476 static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag,
1477  enum cvmx_pow_tag_type tag_type,
1478  uint64_t group)
1479 {
1481  __cvmx_pow_warn_if_pending_switch(__func__);
1482 
1483  /*
1484  * Ensure that there is not a pending tag switch, as a tag
1485  * switch cannot be started if a previous switch is still
1486  * pending.
1487  */
1488  cvmx_pow_tag_sw_wait();
1489  cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
1490 }
1491 
1500 static inline void cvmx_pow_tag_sw_null_nocheck(void)
1501 {
1502  cvmx_addr_t ptr;
1503  cvmx_pow_tag_req_t tag_req;
1504 
1505  if (CVMX_ENABLE_POW_CHECKS) {
1507  __cvmx_pow_warn_if_pending_switch(__func__);
1508  current_tag = cvmx_pow_get_current_tag();
1509  if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1510  pr_warning("%s called with NULL_NULL tag\n",
1511  __func__);
1512  if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1513  pr_warning("%s called when we already have a "
1514  "NULL tag\n",
1515  __func__);
1516  }
1517 
1518  tag_req.u64 = 0;
1519  tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
1520  tag_req.s.type = CVMX_POW_TAG_TYPE_NULL;
1521 
1522  ptr.u64 = 0;
1523  ptr.sio.mem_region = CVMX_IO_SEG;
1524  ptr.sio.is_io = 1;
1525  ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1526 
1527  cvmx_write_io(ptr.u64, tag_req.u64);
1528 
1529  /* switch to NULL completes immediately */
1530 }
1531 
1540 static inline void cvmx_pow_tag_sw_null(void)
1541 {
1543  __cvmx_pow_warn_if_pending_switch(__func__);
1544 
1545  /*
1546  * Ensure that there is not a pending tag switch, as a tag
1547  * switch cannot be started if a previous switch is still
1548  * pending.
1549  */
1550  cvmx_pow_tag_sw_wait();
1551  cvmx_pow_tag_sw_null_nocheck();
1552 
1553  /* switch to NULL completes immediately */
1554 }
1555 
1569 static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
1570  enum cvmx_pow_tag_type tag_type,
1571  uint64_t qos, uint64_t grp)
1572 {
1573  cvmx_addr_t ptr;
1574  cvmx_pow_tag_req_t tag_req;
1575 
1576  wqp->qos = qos;
1577  wqp->tag = tag;
1578  wqp->tag_type = tag_type;
1579  wqp->grp = grp;
1580 
1581  tag_req.u64 = 0;
1582  tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ;
1583  tag_req.s.type = tag_type;
1584  tag_req.s.tag = tag;
1585  tag_req.s.qos = qos;
1586  tag_req.s.grp = grp;
1587 
1588  ptr.u64 = 0;
1589  ptr.sio.mem_region = CVMX_IO_SEG;
1590  ptr.sio.is_io = 1;
1591  ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1592  ptr.sio.offset = cvmx_ptr_to_phys(wqp);
1593 
1594  /*
1595  * SYNC write to memory before the work submit. This is
1596  * necessary as POW may read values from DRAM at this time.
1597  */
1598  CVMX_SYNCWS;
1599  cvmx_write_io(ptr.u64, tag_req.u64);
1600 }
1601 
1613 static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
1614 {
1615  union cvmx_pow_pp_grp_mskx grp_msk;
1616 
1617  grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
1618  grp_msk.s.grp_msk = mask;
1619  cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
1620 }
1621 
1635 static inline void cvmx_pow_set_priority(uint64_t core_num,
1636  const uint8_t priority[])
1637 {
1638  /* POW priorities are supported on CN5xxx and later */
1640  union cvmx_pow_pp_grp_mskx grp_msk;
1641 
1642  grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
1643  grp_msk.s.qos0_pri = priority[0];
1644  grp_msk.s.qos1_pri = priority[1];
1645  grp_msk.s.qos2_pri = priority[2];
1646  grp_msk.s.qos3_pri = priority[3];
1647  grp_msk.s.qos4_pri = priority[4];
1648  grp_msk.s.qos5_pri = priority[5];
1649  grp_msk.s.qos6_pri = priority[6];
1650  grp_msk.s.qos7_pri = priority[7];
1651 
1652  /* Detect gaps between priorities and flag error */
1653  {
1654  int i;
1655  uint32_t prio_mask = 0;
1656 
1657  for (i = 0; i < 8; i++)
1658  if (priority[i] != 0xF)
1659  prio_mask |= 1 << priority[i];
1660 
1661  if (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) {
1662  pr_err("POW static priorities should be "
1663  "contiguous (0x%llx)\n",
1664  (unsigned long long)prio_mask);
1665  return;
1666  }
1667  }
1668 
1669  cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
1670  }
1671 }
1672 
1714 static inline void cvmx_pow_tag_sw_desched_nocheck(
1715  uint32_t tag,
1716  enum cvmx_pow_tag_type tag_type,
1717  uint64_t group,
1718  uint64_t no_sched)
1719 {
1720  cvmx_addr_t ptr;
1721  cvmx_pow_tag_req_t tag_req;
1722 
1723  if (CVMX_ENABLE_POW_CHECKS) {
1725  __cvmx_pow_warn_if_pending_switch(__func__);
1726  current_tag = cvmx_pow_get_current_tag();
1727  if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1728  pr_warning("%s called with NULL_NULL tag\n",
1729  __func__);
1730  if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1731  pr_warning("%s called with NULL tag. Deschedule not "
1732  "allowed from NULL state\n",
1733  __func__);
1734  if ((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC)
1735  && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC))
1736  pr_warning("%s called where neither the before or "
1737  "after tag is ATOMIC\n",
1738  __func__);
1739  }
1740 
1741  tag_req.u64 = 0;
1742  tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
1743  tag_req.s.tag = tag;
1744  tag_req.s.type = tag_type;
1745  tag_req.s.grp = group;
1746  tag_req.s.no_sched = no_sched;
1747 
1748  ptr.u64 = 0;
1749  ptr.sio.mem_region = CVMX_IO_SEG;
1750  ptr.sio.is_io = 1;
1751  ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
1752  /*
1753  * since TAG3 is used, this store will clear the local pending
1754  * switch bit.
1755  */
1756  cvmx_write_io(ptr.u64, tag_req.u64);
1757 }
1758 
1800 static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
1801  enum cvmx_pow_tag_type tag_type,
1802  uint64_t group, uint64_t no_sched)
1803 {
1805  __cvmx_pow_warn_if_pending_switch(__func__);
1806 
1807  /* Need to make sure any writes to the work queue entry are complete */
1808  CVMX_SYNCWS;
1809  /*
1810  * Ensure that there is not a pending tag switch, as a tag
1811  * switch cannot be started if a previous switch is still
1812  * pending.
1813  */
1814  cvmx_pow_tag_sw_wait();
1815  cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
1816 }
1817 
1825 static inline void cvmx_pow_desched(uint64_t no_sched)
1826 {
1827  cvmx_addr_t ptr;
1828  cvmx_pow_tag_req_t tag_req;
1829 
1830  if (CVMX_ENABLE_POW_CHECKS) {
1832  __cvmx_pow_warn_if_pending_switch(__func__);
1833  current_tag = cvmx_pow_get_current_tag();
1834  if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1835  pr_warning("%s called with NULL_NULL tag\n",
1836  __func__);
1837  if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1838  pr_warning("%s called with NULL tag. Deschedule not "
1839  "expected from NULL state\n",
1840  __func__);
1841  }
1842 
1843  /* Need to make sure any writes to the work queue entry are complete */
1844  CVMX_SYNCWS;
1845 
1846  tag_req.u64 = 0;
1847  tag_req.s.op = CVMX_POW_TAG_OP_DESCH;
1848  tag_req.s.no_sched = no_sched;
1849 
1850  ptr.u64 = 0;
1851  ptr.sio.mem_region = CVMX_IO_SEG;
1852  ptr.sio.is_io = 1;
1853  ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
1854  /*
1855  * since TAG3 is used, this store will clear the local pending
1856  * switch bit.
1857  */
1858  cvmx_write_io(ptr.u64, tag_req.u64);
1859 }
1860 
1861 /****************************************************
1862 * Define usage of bits within the 32 bit tag values.
1863 *****************************************************/
1864 
1865 /*
1866  * Number of bits of the tag used by software. The SW bits are always
1867  * a contiguous block of the high starting at bit 31. The hardware
1868  * bits are always the low bits. By default, the top 8 bits of the
1869  * tag are reserved for software, and the low 24 are set by the IPD
1870  * unit.
1871  */
1872 #define CVMX_TAG_SW_BITS (8)
1873 #define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)
1874 
1875 /* Below is the list of values for the top 8 bits of the tag. */
1876 /*
1877  * Tag values with top byte of this value are reserved for internal
1878  * executive uses.
1879  */
1880 #define CVMX_TAG_SW_BITS_INTERNAL 0x1
1881 /* The executive divides the remaining 24 bits as follows:
1882  * - the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
1883  *
1884  * - the lower 16 bits (bits 15 - 0 of the tag) define are the value
1885  * with the subgroup
1886  *
1887  * Note that this section describes the format of tags generated by
1888  * software - refer to the hardware documentation for a description of
1889  * the tags values generated by the packet input hardware. Subgroups
1890  * are defined here.
1891  */
1892 /* Mask for the value portion of the tag */
1893 #define CVMX_TAG_SUBGROUP_MASK 0xFFFF
1894 #define CVMX_TAG_SUBGROUP_SHIFT 16
1895 #define CVMX_TAG_SUBGROUP_PKO 0x1
1896 
1897 /* End of executive tag subgroup definitions */
1898 
1899 /*
1900  * The remaining values software bit values 0x2 - 0xff are available
1901  * for application use.
1902  */
1903 
1917 static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits)
1918 {
1919  return ((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) <<
1921  (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
1922 }
1923 
1932 static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag)
1933 {
1934  return (tag >> (32 - CVMX_TAG_SW_BITS)) &
1935  cvmx_build_mask(CVMX_TAG_SW_BITS);
1936 }
1937 
1947 static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
1948 {
1949  return tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS);
1950 }
1951 
1964 extern int cvmx_pow_capture(void *buffer, int buffer_size);
1965 
1973 extern void cvmx_pow_display(void *buffer, int buffer_size);
1974 
1980 extern int cvmx_pow_get_num_entries(void);
1981 
1982 #endif /* __CVMX_POW_H__ */