Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ixp4xx_qmgr.c
Go to the documentation of this file.
1 /*
2  * Intel IXP4xx Queue Manager driver for Linux
3  *
4  * Copyright (C) 2007 Krzysztof Halasa <[email protected]>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of version 2 of the GNU General Public License
8  * as published by the Free Software Foundation.
9  */
10 
11 #include <linux/ioport.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <mach/qmgr.h>
16 
18 static struct resource *mem_res;
19 static spinlock_t qmgr_lock;
20 static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
21 static void (*irq_handlers[QUEUES])(void *pdev);
22 static void *irq_pdevs[QUEUES];
23 
24 #if DEBUG_QMGR
25 char qmgr_queue_descs[QUEUES][32];
26 #endif
27 
28 void qmgr_set_irq(unsigned int queue, int src,
29  void (*handler)(void *pdev), void *pdev)
30 {
31  unsigned long flags;
32 
33  spin_lock_irqsave(&qmgr_lock, flags);
34  if (queue < HALF_QUEUES) {
35  u32 __iomem *reg;
36  int bit;
38  reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
39  bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
40  __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
41  reg);
42  } else
43  /* IRQ source for queues 32-63 is fixed */
45 
46  irq_handlers[queue] = handler;
47  irq_pdevs[queue] = pdev;
48  spin_unlock_irqrestore(&qmgr_lock, flags);
49 }
50 
51 
52 static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
53 {
54  int i, ret = 0;
55  u32 en_bitmap, src, stat;
56 
57  /* ACK - it may clear any bits so don't rely on it */
58  __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
59 
60  en_bitmap = qmgr_regs->irqen[0];
61  while (en_bitmap) {
62  i = __fls(en_bitmap); /* number of the last "low" queue */
63  en_bitmap &= ~BIT(i);
64  src = qmgr_regs->irqsrc[i >> 3];
65  stat = qmgr_regs->stat1[i >> 3];
66  if (src & 4) /* the IRQ condition is inverted */
67  stat = ~stat;
68  if (stat & BIT(src & 3)) {
69  irq_handlers[i](irq_pdevs[i]);
70  ret = IRQ_HANDLED;
71  }
72  }
73  return ret;
74 }
75 
76 
77 static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
78 {
79  int i, ret = 0;
80  u32 req_bitmap;
81 
82  /* ACK - it may clear any bits so don't rely on it */
83  __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
84 
85  req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
86  while (req_bitmap) {
87  i = __fls(req_bitmap); /* number of the last "high" queue */
88  req_bitmap &= ~BIT(i);
89  irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
90  ret = IRQ_HANDLED;
91  }
92  return ret;
93 }
94 
95 
96 static irqreturn_t qmgr_irq(int irq, void *pdev)
97 {
98  int i, half = (irq == IRQ_IXP4XX_QM1 ? 0 : 1);
99  u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
100 
101  if (!req_bitmap)
102  return 0;
103  __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
104 
105  while (req_bitmap) {
106  i = __fls(req_bitmap); /* number of the last queue */
107  req_bitmap &= ~BIT(i);
108  i += half * HALF_QUEUES;
109  irq_handlers[i](irq_pdevs[i]);
110  }
111  return IRQ_HANDLED;
112 }
113 
114 
115 void qmgr_enable_irq(unsigned int queue)
116 {
117  unsigned long flags;
118  int half = queue / 32;
119  u32 mask = 1 << (queue & (HALF_QUEUES - 1));
120 
121  spin_lock_irqsave(&qmgr_lock, flags);
122  __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
123  &qmgr_regs->irqen[half]);
124  spin_unlock_irqrestore(&qmgr_lock, flags);
125 }
126 
127 void qmgr_disable_irq(unsigned int queue)
128 {
129  unsigned long flags;
130  int half = queue / 32;
131  u32 mask = 1 << (queue & (HALF_QUEUES - 1));
132 
133  spin_lock_irqsave(&qmgr_lock, flags);
134  __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
135  &qmgr_regs->irqen[half]);
136  __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
137  spin_unlock_irqrestore(&qmgr_lock, flags);
138 }
139 
140 static inline void shift_mask(u32 *mask)
141 {
142  mask[3] = mask[3] << 1 | mask[2] >> 31;
143  mask[2] = mask[2] << 1 | mask[1] >> 31;
144  mask[1] = mask[1] << 1 | mask[0] >> 31;
145  mask[0] <<= 1;
146 }
147 
148 #if DEBUG_QMGR
149 int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
150  unsigned int nearly_empty_watermark,
151  unsigned int nearly_full_watermark,
152  const char *desc_format, const char* name)
153 #else
154 int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
155  unsigned int nearly_empty_watermark,
156  unsigned int nearly_full_watermark)
157 #endif
158 {
159  u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
160  int err;
161 
162  BUG_ON(queue >= QUEUES);
163 
164  if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
165  return -EINVAL;
166 
167  switch (len) {
168  case 16:
169  cfg = 0 << 24;
170  mask[0] = 0x1;
171  break;
172  case 32:
173  cfg = 1 << 24;
174  mask[0] = 0x3;
175  break;
176  case 64:
177  cfg = 2 << 24;
178  mask[0] = 0xF;
179  break;
180  case 128:
181  cfg = 3 << 24;
182  mask[0] = 0xFF;
183  break;
184  default:
185  return -EINVAL;
186  }
187 
188  cfg |= nearly_empty_watermark << 26;
189  cfg |= nearly_full_watermark << 29;
190  len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
191  mask[1] = mask[2] = mask[3] = 0;
192 
193  if (!try_module_get(THIS_MODULE))
194  return -ENODEV;
195 
196  spin_lock_irq(&qmgr_lock);
197  if (__raw_readl(&qmgr_regs->sram[queue])) {
198  err = -EBUSY;
199  goto err;
200  }
201 
202  while (1) {
203  if (!(used_sram_bitmap[0] & mask[0]) &&
204  !(used_sram_bitmap[1] & mask[1]) &&
205  !(used_sram_bitmap[2] & mask[2]) &&
206  !(used_sram_bitmap[3] & mask[3]))
207  break; /* found free space */
208 
209  addr++;
210  shift_mask(mask);
211  if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
212  printk(KERN_ERR "qmgr: no free SRAM space for"
213  " queue %i\n", queue);
214  err = -ENOMEM;
215  goto err;
216  }
217  }
218 
219  used_sram_bitmap[0] |= mask[0];
220  used_sram_bitmap[1] |= mask[1];
221  used_sram_bitmap[2] |= mask[2];
222  used_sram_bitmap[3] |= mask[3];
223  __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
224 #if DEBUG_QMGR
225  snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
226  desc_format, name);
227  printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
228  qmgr_queue_descs[queue], queue, addr);
229 #endif
230  spin_unlock_irq(&qmgr_lock);
231  return 0;
232 
233 err:
234  spin_unlock_irq(&qmgr_lock);
235  module_put(THIS_MODULE);
236  return err;
237 }
238 
239 void qmgr_release_queue(unsigned int queue)
240 {
241  u32 cfg, addr, mask[4];
242 
243  BUG_ON(queue >= QUEUES); /* not in valid range */
244 
245  spin_lock_irq(&qmgr_lock);
246  cfg = __raw_readl(&qmgr_regs->sram[queue]);
247  addr = (cfg >> 14) & 0xFF;
248 
249  BUG_ON(!addr); /* not requested */
250 
251  switch ((cfg >> 24) & 3) {
252  case 0: mask[0] = 0x1; break;
253  case 1: mask[0] = 0x3; break;
254  case 2: mask[0] = 0xF; break;
255  case 3: mask[0] = 0xFF; break;
256  }
257 
258  mask[1] = mask[2] = mask[3] = 0;
259 
260  while (addr--)
261  shift_mask(mask);
262 
263 #if DEBUG_QMGR
264  printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
265  qmgr_queue_descs[queue], queue);
266  qmgr_queue_descs[queue][0] = '\x0';
267 #endif
268 
269  while ((addr = qmgr_get_entry(queue)))
270  printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
271  queue, addr);
272 
273  __raw_writel(0, &qmgr_regs->sram[queue]);
274 
275  used_sram_bitmap[0] &= ~mask[0];
276  used_sram_bitmap[1] &= ~mask[1];
277  used_sram_bitmap[2] &= ~mask[2];
278  used_sram_bitmap[3] &= ~mask[3];
279  irq_handlers[queue] = NULL; /* catch IRQ bugs */
280  spin_unlock_irq(&qmgr_lock);
281 
282  module_put(THIS_MODULE);
283 }
284 
285 static int qmgr_init(void)
286 {
287  int i, err;
288  irq_handler_t handler1, handler2;
289 
292  "IXP4xx Queue Manager");
293  if (mem_res == NULL)
294  return -EBUSY;
295 
296  /* reset qmgr registers */
297  for (i = 0; i < 4; i++) {
298  __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
299  __raw_writel(0, &qmgr_regs->irqsrc[i]);
300  }
301  for (i = 0; i < 2; i++) {
302  __raw_writel(0, &qmgr_regs->stat2[i]);
303  __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
304  __raw_writel(0, &qmgr_regs->irqen[i]);
305  }
306 
307  __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
308  __raw_writel(0, &qmgr_regs->statf_h);
309 
310  for (i = 0; i < QUEUES; i++)
311  __raw_writel(0, &qmgr_regs->sram[i]);
312 
313  if (cpu_is_ixp42x_rev_a0()) {
314  handler1 = qmgr_irq1_a0;
315  handler2 = qmgr_irq2_a0;
316  } else
317  handler1 = handler2 = qmgr_irq;
318 
319  err = request_irq(IRQ_IXP4XX_QM1, handler1, 0, "IXP4xx Queue Manager",
320  NULL);
321  if (err) {
322  printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n",
323  IRQ_IXP4XX_QM1, err);
324  goto error_irq;
325  }
326 
327  err = request_irq(IRQ_IXP4XX_QM2, handler2, 0, "IXP4xx Queue Manager",
328  NULL);
329  if (err) {
330  printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n",
331  IRQ_IXP4XX_QM2, err);
332  goto error_irq2;
333  }
334 
335  used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
336  spin_lock_init(&qmgr_lock);
337 
338  printk(KERN_INFO "IXP4xx Queue Manager initialized.\n");
339  return 0;
340 
341 error_irq2:
343 error_irq:
345  return err;
346 }
347 
348 static void qmgr_remove(void)
349 {
355 }
356 
357 module_init(qmgr_init);
358 module_exit(qmgr_remove);
359 
360 MODULE_LICENSE("GPL v2");
361 MODULE_AUTHOR("Krzysztof Halasa");
362 
366 #if DEBUG_QMGR
367 EXPORT_SYMBOL(qmgr_queue_descs);
369 #else
371 #endif