Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cvmx-cmd-queue.c
Go to the documentation of this file.
1 /***********************license start***************
2  * Author: Cavium Networks
3  *
4  * Contact: [email protected]
5  * This file is part of the OCTEON SDK
6  *
7  * Copyright (c) 2003-2008 Cavium Networks
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT. See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this file; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22  * or visit http://www.gnu.org/licenses/.
23  *
24  * This file may also be available under a different license from Cavium.
25  * Contact Cavium Networks for more information
26  ***********************license end**************************************/
27 
28 /*
29  * Support functions for managing command queues used for
30  * various hardware blocks.
31  */
32 
33 #include <linux/kernel.h>
34 
35 #include <asm/octeon/octeon.h>
36 
37 #include <asm/octeon/cvmx-config.h>
38 #include <asm/octeon/cvmx-fpa.h>
40 
44 
50 
56 static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
57 {
58  char *alloc_name = "cvmx_cmd_queues";
59 #if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
60  extern uint64_t octeon_reserve32_memory;
61 #endif
62 
63  if (likely(__cvmx_cmd_queue_state_ptr))
65 
66 #if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
67  if (octeon_reserve32_memory)
68  __cvmx_cmd_queue_state_ptr =
69  cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
70  octeon_reserve32_memory,
71  octeon_reserve32_memory +
73  20) - 1, 128, alloc_name);
74  else
75 #endif
76  __cvmx_cmd_queue_state_ptr =
77  cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
78  128,
79  alloc_name);
80  if (__cvmx_cmd_queue_state_ptr)
81  memset(__cvmx_cmd_queue_state_ptr, 0,
82  sizeof(*__cvmx_cmd_queue_state_ptr));
83  else {
84  struct cvmx_bootmem_named_block_desc *block_desc =
86  if (block_desc)
87  __cvmx_cmd_queue_state_ptr =
88  cvmx_phys_to_ptr(block_desc->base_addr);
89  else {
91  ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
92  alloc_name);
94  }
95  }
97 }
98 
112  int max_depth, int fpa_pool,
113  int pool_size)
114 {
115  __cvmx_cmd_queue_state_t *qstate;
116  cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
117  if (result != CVMX_CMD_QUEUE_SUCCESS)
118  return result;
119 
120  qstate = __cvmx_cmd_queue_get_state(queue_id);
121  if (qstate == NULL)
123 
124  /*
125  * We artificially limit max_depth to 1<<20 words. It is an
126  * arbitrary limit.
127  */
129  if ((max_depth < 0) || (max_depth > 1 << 20))
131  } else if (max_depth != 0)
133 
134  if ((fpa_pool < 0) || (fpa_pool > 7))
136  if ((pool_size < 128) || (pool_size > 65536))
138 
139  /* See if someone else has already initialized the queue */
140  if (qstate->base_ptr_div128) {
141  if (max_depth != (int)qstate->max_depth) {
142  cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
143  "Queue already initialized with different "
144  "max_depth (%d).\n",
145  (int)qstate->max_depth);
147  }
148  if (fpa_pool != qstate->fpa_pool) {
149  cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
150  "Queue already initialized with different "
151  "FPA pool (%u).\n",
152  qstate->fpa_pool);
154  }
155  if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
156  cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
157  "Queue already initialized with different "
158  "FPA pool size (%u).\n",
159  (qstate->pool_size_m1 + 1) << 3);
161  }
162  CVMX_SYNCWS;
164  } else {
165  union cvmx_fpa_ctl_status status;
166  void *buffer;
167 
168  status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
169  if (!status.s.enb) {
170  cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
171  "FPA is not enabled.\n");
173  }
174  buffer = cvmx_fpa_alloc(fpa_pool);
175  if (buffer == NULL) {
176  cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
177  "Unable to allocate initial buffer.\n");
179  }
180 
181  memset(qstate, 0, sizeof(*qstate));
182  qstate->max_depth = max_depth;
183  qstate->fpa_pool = fpa_pool;
184  qstate->pool_size_m1 = (pool_size >> 3) - 1;
185  qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
186  /*
187  * We zeroed the now serving field so we need to also
188  * zero the ticket.
189  */
190  __cvmx_cmd_queue_state_ptr->
191  ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
192  CVMX_SYNCWS;
193  return CVMX_CMD_QUEUE_SUCCESS;
194  }
195 }
196 
207 {
208  __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
209  if (qptr == NULL) {
210  cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
211  "get queue information.\n");
213  }
214 
215  if (cvmx_cmd_queue_length(queue_id) > 0) {
216  cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
217  "has data in it.\n");
218  return CVMX_CMD_QUEUE_FULL;
219  }
220 
221  __cvmx_cmd_queue_lock(queue_id, qptr);
222  if (qptr->base_ptr_div128) {
223  cvmx_fpa_free(cvmx_phys_to_ptr
224  ((uint64_t) qptr->base_ptr_div128 << 7),
225  qptr->fpa_pool, 0);
226  qptr->base_ptr_div128 = 0;
227  }
228  __cvmx_cmd_queue_unlock(qptr);
229 
230  return CVMX_CMD_QUEUE_SUCCESS;
231 }
232 
242 {
244  if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
246  }
247 
248  /*
249  * The cast is here so gcc with check that all values in the
250  * cvmx_cmd_queue_id_t enumeration are here.
251  */
252  switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
254  /*
255  * FIXME: Need atomic lock on
256  * CVMX_PKO_REG_READ_IDX. Right now we are normally
257  * called with the queue lock, so that is a SLIGHT
258  * amount of protection.
259  */
260  cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
262  union cvmx_pko_mem_debug9 debug9;
263  debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
264  return debug9.cn38xx.doorbell;
265  } else {
266  union cvmx_pko_mem_debug8 debug8;
267  debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
268  return debug8.cn58xx.doorbell;
269  }
270  case CVMX_CMD_QUEUE_ZIP:
271  case CVMX_CMD_QUEUE_DFA:
272  case CVMX_CMD_QUEUE_RAID:
273  /* FIXME: Implement other lengths */
274  return 0;
276  {
277  union cvmx_npei_dmax_counts dmax_counts;
278  dmax_counts.u64 =
279  cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
280  (queue_id & 0x7));
281  return dmax_counts.s.dbell;
282  }
283  case CVMX_CMD_QUEUE_END:
285  }
287 }
288 
300 {
301  __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
302  if (qptr && qptr->base_ptr_div128)
303  return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
304  else
305  return NULL;
306 }