Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dpcsup.c
Go to the documentation of this file.
1 /*
2  * Adaptec AAC series RAID controller driver
3  * (c) Copyright 2001 Red Hat Inc.
4  *
5  * based on the old aacraid driver that is..
6  * Adaptec aacraid device driver for Linux.
7  *
8  * Copyright (c) 2000-2010 Adaptec, Inc.
9  * 2010 PMC-Sierra, Inc. ([email protected])
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; see the file COPYING. If not, write to
23  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24  *
25  * Module Name:
26  * dpcsup.c
27  *
28  * Abstract: All DPC processing routines for the cyclone board occur here.
29  *
30  *
31  */
32 
33 #include <linux/kernel.h>
34 #include <linux/init.h>
35 #include <linux/types.h>
36 #include <linux/spinlock.h>
37 #include <linux/slab.h>
38 #include <linux/completion.h>
39 #include <linux/blkdev.h>
40 #include <linux/semaphore.h>
41 
42 #include "aacraid.h"
43 
54 unsigned int aac_response_normal(struct aac_queue * q)
55 {
56  struct aac_dev * dev = q->dev;
57  struct aac_entry *entry;
58  struct hw_fib * hwfib;
59  struct fib * fib;
60  int consumed = 0;
61  unsigned long flags, mflags;
62 
63  spin_lock_irqsave(q->lock, flags);
64  /*
65  * Keep pulling response QEs off the response queue and waking
66  * up the waiters until there are no more QEs. We then return
67  * back to the system. If no response was requesed we just
68  * deallocate the Fib here and continue.
69  */
70  while(aac_consumer_get(dev, q, &entry))
71  {
72  int fast;
73  u32 index = le32_to_cpu(entry->addr);
74  fast = index & 0x01;
75  fib = &dev->fibs[index >> 2];
76  hwfib = fib->hw_fib_va;
77 
79  /*
80  * Remove this fib from the Outstanding I/O queue.
81  * But only if it has not already been timed out.
82  *
83  * If the fib has been timed out already, then just
84  * continue. The caller has already been notified that
85  * the fib timed out.
86  */
87  dev->queues->queue[AdapNormCmdQueue].numpending--;
88 
90  spin_unlock_irqrestore(q->lock, flags);
91  aac_fib_complete(fib);
92  aac_fib_free(fib);
93  spin_lock_irqsave(q->lock, flags);
94  continue;
95  }
96  spin_unlock_irqrestore(q->lock, flags);
97 
98  if (fast) {
99  /*
100  * Doctor the fib
101  */
102  *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
103  hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
105  }
106 
108 
109  if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
110  {
111  __le32 *pstatus = (__le32 *)hwfib->data;
112  if (*pstatus & cpu_to_le32(0xffff0000))
113  *pstatus = cpu_to_le32(ST_OK);
114  }
115  if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
116  {
117  if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
118  FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
119  else
120  FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
121  /*
122  * NOTE: we cannot touch the fib after this
123  * call, because it may have been deallocated.
124  */
126  fib->callback(fib->callback_data, fib);
127  } else {
128  unsigned long flagv;
129  spin_lock_irqsave(&fib->event_lock, flagv);
130  if (!fib->done) {
131  fib->done = 1;
132  up(&fib->event_wait);
133  }
134  spin_unlock_irqrestore(&fib->event_lock, flagv);
135 
136  spin_lock_irqsave(&dev->manage_lock, mflags);
137  dev->management_fib_count--;
138  spin_unlock_irqrestore(&dev->manage_lock, mflags);
139 
140  FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
141  if (fib->done == 2) {
142  spin_lock_irqsave(&fib->event_lock, flagv);
143  fib->done = 0;
144  spin_unlock_irqrestore(&fib->event_lock, flagv);
145  aac_fib_complete(fib);
146  aac_fib_free(fib);
147  }
148  }
149  consumed++;
150  spin_lock_irqsave(q->lock, flags);
151  }
152 
153  if (consumed > aac_config.peak_fibs)
154  aac_config.peak_fibs = consumed;
155  if (consumed == 0)
156  aac_config.zero_fibs++;
157 
158  spin_unlock_irqrestore(q->lock, flags);
159  return 0;
160 }
161 
162 
173 unsigned int aac_command_normal(struct aac_queue *q)
174 {
175  struct aac_dev * dev = q->dev;
176  struct aac_entry *entry;
177  unsigned long flags;
178 
179  spin_lock_irqsave(q->lock, flags);
180 
181  /*
182  * Keep pulling response QEs off the response queue and waking
183  * up the waiters until there are no more QEs. We then return
184  * back to the system.
185  */
186  while(aac_consumer_get(dev, q, &entry))
187  {
188  struct fib fibctx;
189  struct hw_fib * hw_fib;
190  u32 index;
191  struct fib *fib = &fibctx;
192 
193  index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
194  hw_fib = &dev->aif_base_va[index];
195 
196  /*
197  * Allocate a FIB at all costs. For non queued stuff
198  * we can just use the stack so we are happy. We need
199  * a fib object in order to manage the linked lists
200  */
201  if (dev->aif_thread)
202  if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
203  fib = &fibctx;
204 
205  memset(fib, 0, sizeof(struct fib));
206  INIT_LIST_HEAD(&fib->fiblink);
208  fib->size = sizeof(struct fib);
209  fib->hw_fib_va = hw_fib;
210  fib->data = hw_fib->data;
211  fib->dev = dev;
212 
213 
214  if (dev->aif_thread && fib != &fibctx) {
215  list_add_tail(&fib->fiblink, &q->cmdq);
218  } else {
220  spin_unlock_irqrestore(q->lock, flags);
221  /*
222  * Set the status of this FIB
223  */
224  *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
225  aac_fib_adapter_complete(fib, sizeof(u32));
226  spin_lock_irqsave(q->lock, flags);
227  }
228  }
229  spin_unlock_irqrestore(q->lock, flags);
230  return 0;
231 }
232 
233 /*
234  *
235  * aac_aif_callback
236  * @context: the context set in the fib - here it is scsi cmd
237  * @fibptr: pointer to the fib
238  *
239  * Handles the AIFs - new method (SRC)
240  *
241  */
242 
243 static void aac_aif_callback(void *context, struct fib * fibptr)
244 {
245  struct fib *fibctx;
246  struct aac_dev *dev;
247  struct aac_aifcmd *cmd;
248  int status;
249 
250  fibctx = (struct fib *)context;
251  BUG_ON(fibptr == NULL);
252  dev = fibptr->dev;
253 
254  if (fibptr->hw_fib_va->header.XferState &
256  aac_fib_complete(fibptr);
257  aac_fib_free(fibptr);
258  return;
259  }
260 
261  aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
262 
263  aac_fib_init(fibctx);
264  cmd = (struct aac_aifcmd *) fib_data(fibctx);
266 
267  status = aac_fib_send(AifRequest,
268  fibctx,
269  sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
270  FsaNormal,
271  0, 1,
272  (fib_callback)aac_aif_callback, fibctx);
273 }
274 
275 
285 unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
286  int isAif, int isFastResponse, struct hw_fib *aif_fib)
287 {
288  unsigned long mflags;
289  dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
290  if (isAif == 1) { /* AIF - common */
291  struct hw_fib * hw_fib;
292  struct fib * fib;
293  struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
294  unsigned long flags;
295 
296  /*
297  * Allocate a FIB. For non queued stuff we can just use
298  * the stack so we are happy. We need a fib object in order to
299  * manage the linked lists.
300  */
301  if ((!dev->aif_thread)
302  || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
303  return 1;
304  if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
305  kfree (fib);
306  return 1;
307  }
308  if (aif_fib != NULL) {
309  memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
310  } else {
311  memcpy(hw_fib,
312  (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) +
313  index), sizeof(struct hw_fib));
314  }
315  INIT_LIST_HEAD(&fib->fiblink);
317  fib->size = sizeof(struct fib);
318  fib->hw_fib_va = hw_fib;
319  fib->data = hw_fib->data;
320  fib->dev = dev;
321 
322  spin_lock_irqsave(q->lock, flags);
323  list_add_tail(&fib->fiblink, &q->cmdq);
325  spin_unlock_irqrestore(q->lock, flags);
326  return 1;
327  } else if (isAif == 2) { /* AIF - new (SRC) */
328  struct fib *fibctx;
329  struct aac_aifcmd *cmd;
330 
331  fibctx = aac_fib_alloc(dev);
332  if (!fibctx)
333  return 1;
334  aac_fib_init(fibctx);
335 
336  cmd = (struct aac_aifcmd *) fib_data(fibctx);
338 
339  return aac_fib_send(AifRequest,
340  fibctx,
341  sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
342  FsaNormal,
343  0, 1,
344  (fib_callback)aac_aif_callback, fibctx);
345  } else {
346  struct fib *fib = &dev->fibs[index];
347  struct hw_fib * hwfib = fib->hw_fib_va;
348 
349  /*
350  * Remove this fib from the Outstanding I/O queue.
351  * But only if it has not already been timed out.
352  *
353  * If the fib has been timed out already, then just
354  * continue. The caller has already been notified that
355  * the fib timed out.
356  */
357  dev->queues->queue[AdapNormCmdQueue].numpending--;
358 
360  aac_fib_complete(fib);
361  aac_fib_free(fib);
362  return 0;
363  }
364 
365  if (isFastResponse) {
366  /*
367  * Doctor the fib
368  */
369  *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
370  hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
372  }
373 
375 
376  if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
377  {
378  __le32 *pstatus = (__le32 *)hwfib->data;
379  if (*pstatus & cpu_to_le32(0xffff0000))
380  *pstatus = cpu_to_le32(ST_OK);
381  }
382  if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
383  {
384  if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
385  FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
386  else
387  FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
388  /*
389  * NOTE: we cannot touch the fib after this
390  * call, because it may have been deallocated.
391  */
393  fib->callback(fib->callback_data, fib);
394  } else {
395  unsigned long flagv;
396  dprintk((KERN_INFO "event_wait up\n"));
397  spin_lock_irqsave(&fib->event_lock, flagv);
398  if (!fib->done) {
399  fib->done = 1;
400  up(&fib->event_wait);
401  }
402  spin_unlock_irqrestore(&fib->event_lock, flagv);
403 
404  spin_lock_irqsave(&dev->manage_lock, mflags);
405  dev->management_fib_count--;
406  spin_unlock_irqrestore(&dev->manage_lock, mflags);
407 
408  FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
409  if (fib->done == 2) {
410  spin_lock_irqsave(&fib->event_lock, flagv);
411  fib->done = 0;
412  spin_unlock_irqrestore(&fib->event_lock, flagv);
413  aac_fib_complete(fib);
414  aac_fib_free(fib);
415  }
416 
417  }
418  return 0;
419  }
420 }