Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpipe.h
Go to the documentation of this file.
1 /*
2  * Copyright 2012 Tilera Corporation. All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation, version 2.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  * NON INFRINGEMENT. See the GNU General Public License for
12  * more details.
13  */
14 
15 /* Machine-generated file; do not edit. */
16 
17 #ifndef __ARCH_MPIPE_H__
18 #define __ARCH_MPIPE_H__
19 
20 #include <arch/abi.h>
21 #include <arch/mpipe_def.h>
22 
23 #ifndef __ASSEMBLER__
24 
25 /*
26  * MMIO Ingress DMA Release Region Address.
27  * This is a description of the physical addresses used to manipulate ingress
28  * credit counters. Accesses to this address space should use an address of
29  * this form and a value like that specified in IDMA_RELEASE_REGION_VAL.
30  */
31 
32 __extension__
33 typedef union
34 {
35  struct
36  {
37 #ifndef __BIG_ENDIAN__
38  /* Reserved. */
39  uint_reg_t __reserved_0 : 3;
40  /* NotifRing to be released */
42  /* Bucket to be released */
43  uint_reg_t bucket : 13;
44  /* Enable NotifRing release */
45  uint_reg_t ring_enable : 1;
46  /* Enable Bucket release */
47  uint_reg_t bucket_enable : 1;
48  /*
49  * This field of the address selects the region (address space) to be
50  * accessed. For the iDMA release region, this field must be 4.
51  */
53  /* Reserved. */
55  /* This field of the address indexes the 32 entry service domain table. */
56  uint_reg_t svc_dom : 5;
57  /* Reserved. */
59 #else /* __BIG_ENDIAN__ */
61  uint_reg_t svc_dom : 5;
63  uint_reg_t region : 3;
64  uint_reg_t bucket_enable : 1;
65  uint_reg_t ring_enable : 1;
66  uint_reg_t bucket : 13;
67  uint_reg_t ring : 8;
68  uint_reg_t __reserved_0 : 3;
69 #endif
70  };
71 
74 
75 /*
76  * MMIO Ingress DMA Release Region Value - Release NotifRing and/or Bucket.
77  * Provides release of the associated NotifRing. The address of the MMIO
78  * operation is described in IDMA_RELEASE_REGION_ADDR.
79  */
80 
81 __extension__
82 typedef union
83 {
84  struct
85  {
86 #ifndef __BIG_ENDIAN__
87  /*
88  * Number of packets being released. The load balancer's count of
89  * inflight packets will be decremented by this amount for the associated
90  * Bucket and/or NotifRing
91  */
93  /* Reserved. */
95 #else /* __BIG_ENDIAN__ */
97  uint_reg_t count : 16;
98 #endif
99  };
100 
103 
104 /*
105  * MMIO Buffer Stack Manager Region Address.
106  * This MMIO region is used for posting or fetching buffers to/from the
107  * buffer stack manager. On an MMIO load, this pops a buffer descriptor from
108  * the top of stack if one is available. On an MMIO store, this pushes a
109  * buffer to the stack. The value read or written is described in
110  * BSM_REGION_VAL.
111  */
112 
113 __extension__
114 typedef union
115 {
116  struct
117  {
118 #ifndef __BIG_ENDIAN__
119  /* Reserved. */
120  uint_reg_t __reserved_0 : 3;
121  /* BufferStack being accessed. */
123  /* Reserved. */
125  /*
126  * This field of the address selects the region (address space) to be
127  * accessed. For the buffer stack manager region, this field must be 6.
128  */
130  /* Reserved. */
132  /* This field of the address indexes the 32 entry service domain table. */
133  uint_reg_t svc_dom : 5;
134  /* Reserved. */
136 #else /* __BIG_ENDIAN__ */
138  uint_reg_t svc_dom : 5;
140  uint_reg_t region : 3;
142  uint_reg_t stack : 5;
143  uint_reg_t __reserved_0 : 3;
144 #endif
145  };
146 
149 
150 /*
151  * MMIO Buffer Stack Manager Region Value.
152  * This MMIO region is used for posting or fetching buffers to/from the
153  * buffer stack manager. On an MMIO load, this pops a buffer descriptor from
154  * the top of stack if one is available. On an MMIO store, this pushes a
155  * buffer to the stack. The address of the MMIO operation is described in
156  * BSM_REGION_ADDR.
157  */
158 
159 __extension__
160 typedef union
161 {
162  struct
163  {
164 #ifndef __BIG_ENDIAN__
165  /* Reserved. */
166  uint_reg_t __reserved_0 : 7;
167  /*
168  * Base virtual address of the buffer. Must be sign extended by consumer.
169  */
170  int_reg_t va : 35;
171  /* Reserved. */
173  /*
174  * Index of the buffer stack to which this buffer belongs. Ignored on
175  * writes since the offset bits specify the stack being accessed.
176  */
177  uint_reg_t stack_idx : 5;
178  /* Reserved. */
180  /*
181  * Reads as one to indicate that this is a hardware managed buffer.
182  * Ignored on writes since all buffers on a given stack are the same size.
183  */
184  uint_reg_t hwb : 1;
185  /*
186  * Encoded size of buffer (ignored on writes):
187  * 0 = 128 bytes
188  * 1 = 256 bytes
189  * 2 = 512 bytes
190  * 3 = 1024 bytes
191  * 4 = 1664 bytes
192  * 5 = 4096 bytes
193  * 6 = 10368 bytes
194  * 7 = 16384 bytes
195  */
197  /*
198  * Valid indication for the buffer. Ignored on writes.
199  * 0 : Valid buffer descriptor popped from stack.
200  * 3 : Could not pop a buffer from the stack. Either the stack is empty,
201  * or the hardware's prefetch buffer is empty for this stack.
202  */
204 #else /* __BIG_ENDIAN__ */
205  uint_reg_t c : 2;
206  uint_reg_t size : 3;
207  uint_reg_t hwb : 1;
209  uint_reg_t stack_idx : 5;
211  int_reg_t va : 35;
212  uint_reg_t __reserved_0 : 7;
213 #endif
214  };
215 
218 
219 /*
220  * MMIO Egress DMA Post Region Address.
221  * Used to post descriptor locations to the eDMA descriptor engine. The
222  * value to be written is described in EDMA_POST_REGION_VAL
223  */
224 
225 __extension__
226 typedef union
227 {
228  struct
229  {
230 #ifndef __BIG_ENDIAN__
231  /* Reserved. */
232  uint_reg_t __reserved_0 : 3;
233  /* eDMA ring being accessed */
235  /* Reserved. */
237  /*
238  * This field of the address selects the region (address space) to be
239  * accessed. For the egress DMA post region, this field must be 5.
240  */
242  /* Reserved. */
244  /* This field of the address indexes the 32 entry service domain table. */
245  uint_reg_t svc_dom : 5;
246  /* Reserved. */
248 #else /* __BIG_ENDIAN__ */
250  uint_reg_t svc_dom : 5;
252  uint_reg_t region : 3;
254  uint_reg_t ring : 5;
255  uint_reg_t __reserved_0 : 3;
256 #endif
257  };
258 
261 
262 /*
263  * MMIO Egress DMA Post Region Value.
264  * Used to post descriptor locations to the eDMA descriptor engine. The
265  * address is described in EDMA_POST_REGION_ADDR.
266  */
267 
268 __extension__
269 typedef union
270 {
271  struct
272  {
273 #ifndef __BIG_ENDIAN__
274  /*
275  * For writes, this specifies the current ring tail pointer prior to any
276  * post. For example, to post 1 or more descriptors starting at location
277  * 23, this would contain 23 (not 24). On writes, this index must be
278  * masked based on the ring size. The new tail pointer after this post
279  * is COUNT+RING_IDX (masked by the ring size).
280  *
281  * For reads, this provides the hardware descriptor fetcher's head
282  * pointer. The descriptors prior to the head pointer, however, may not
283  * yet have been processed so this indicator is only used to determine
284  * how full the ring is and if software may post more descriptors.
285  */
286  uint_reg_t ring_idx : 16;
287  /*
288  * For writes, this specifies number of contiguous descriptors that are
289  * being posted. Software may post up to RingSize descriptors with a
290  * single MMIO store. A zero in this field on a write will "wake up" an
291  * eDMA ring and cause it fetch descriptors regardless of the hardware's
292  * current view of the state of the tail pointer.
293  *
294  * For reads, this field provides a rolling count of the number of
295  * descriptors that have been completely processed. This may be used by
296  * software to determine when buffers associated with a descriptor may be
297  * returned or reused. When the ring's flush bit is cleared by software
298  * (after having been set by HW or SW), the COUNT will be cleared.
299  */
301  /*
302  * For writes, this specifies the generation number of the tail being
303  * posted. Note that if tail+cnt wraps to the beginning of the ring, the
304  * eDMA hardware assumes that the descriptors posted at the beginning of
305  * the ring are also valid so it is okay to post around the wrap point.
306  *
307  * For reads, this is the current generation number. Valid descriptors
308  * will have the inverse of this generation number.
309  */
311  /* Reserved. */
313 #else /* __BIG_ENDIAN__ */
314  uint_reg_t __reserved : 31;
315  uint_reg_t gen : 1;
316  uint_reg_t count : 16;
317  uint_reg_t ring_idx : 16;
318 #endif
319  };
320 
323 
324 /*
325  * Load Balancer Bucket Status Data.
326  * Read/Write data for load balancer Bucket-Status Table. 4160 entries
327  * indexed by LBL_INIT_CTL.IDX when LBL_INIT_CTL.STRUCT_SEL is BSTS_TBL
328  */
329 
330 __extension__
331 typedef union
332 {
333  struct
334  {
335 #ifndef __BIG_ENDIAN__
336  /* NotifRing currently assigned to this bucket. */
337  uint_reg_t notifring : 8;
338  /* Current reference count. */
340  /* Group associated with this bucket. */
342  /* Mode select for this bucket. */
344  /* Reserved. */
346 #else /* __BIG_ENDIAN__ */
347  uint_reg_t __reserved : 32;
348  uint_reg_t mode : 3;
349  uint_reg_t group : 5;
350  uint_reg_t count : 16;
351  uint_reg_t notifring : 8;
352 #endif
353  };
354 
357 #endif /* !defined(__ASSEMBLER__) */
358 
359 #endif /* !defined(__ARCH_MPIPE_H__) */