Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
exec-osm.c
Go to the documentation of this file.
1 /*
2  * Executive OSM
3  *
4  * Copyright (C) 1999-2002 Red Hat Software
5  *
6  * Written by Alan Cox, Building Number Three Ltd
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the
10  * Free Software Foundation; either version 2 of the License, or (at your
11  * option) any later version.
12  *
13  * A lot of the I2O message side code from this is taken from the Red
14  * Creek RCPCI45 adapter driver by Red Creek Communications
15  *
16  * Fixes/additions:
17  * Philipp Rumpf
18  * Juha Sievänen <[email protected]>
19  * Auvo Häkkinen <[email protected]>
20  * Deepak Saxena <[email protected]>
21  * Boji T Kannanthanam <[email protected]>
22  * Alan Cox <[email protected]>:
23  * Ported to Linux 2.5.
24  * Markus Lidel <[email protected]>:
25  * Minor fixes for 2.6.
26  * Markus Lidel <[email protected]>:
27  * Support for sysfs included.
28  */
29 
30 #include <linux/module.h>
31 #include <linux/i2o.h>
32 #include <linux/delay.h>
33 #include <linux/workqueue.h>
34 #include <linux/string.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */
37 #include <asm/param.h> /* HZ */
38 #include "core.h"
39 
40 #define OSM_NAME "exec-osm"
41 
43 
44 /* global wait list for POST WAIT */
45 static LIST_HEAD(i2o_exec_wait_list);
46 
47 /* Wait struct needed for POST WAIT */
48 struct i2o_exec_wait {
49  wait_queue_head_t *wq; /* Pointer to Wait queue */
50  struct i2o_dma dma; /* DMA buffers to free on failure */
51  u32 tcntxt; /* transaction context from reply */
52  int complete; /* 1 if reply received otherwise 0 */
53  u32 m; /* message id */
54  struct i2o_message *msg; /* pointer to the reply message */
55  struct list_head list; /* node in global wait list */
56  spinlock_t lock; /* lock before modifying */
57 };
58 
59 /* Work struct needed to handle LCT NOTIFY replies */
61  struct work_struct work; /* work struct */
62  struct i2o_controller *c; /* controller on which the LCT NOTIFY
63  was received */
64 };
65 
66 /* Exec OSM class handling definition */
67 static struct i2o_class_id i2o_exec_class_id[] = {
70 };
71 
80 static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
81 {
82  struct i2o_exec_wait *wait;
83 
84  wait = kzalloc(sizeof(*wait), GFP_KERNEL);
85  if (!wait)
86  return NULL;
87 
88  INIT_LIST_HEAD(&wait->list);
89  spin_lock_init(&wait->lock);
90 
91  return wait;
92 };
93 
98 static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
99 {
100  kfree(wait);
101 };
102 
123  unsigned long timeout, struct i2o_dma *dma)
124 {
126  struct i2o_exec_wait *wait;
127  static u32 tcntxt = 0x80000000;
128  unsigned long flags;
129  int rc = 0;
130 
131  wait = i2o_exec_wait_alloc();
132  if (!wait) {
133  i2o_msg_nop(c, msg);
134  return -ENOMEM;
135  }
136 
137  if (tcntxt == 0xffffffff)
138  tcntxt = 0x80000000;
139 
140  if (dma)
141  wait->dma = *dma;
142 
143  /*
144  * Fill in the message initiator context and transaction context.
145  * We will only use transaction contexts >= 0x80000000 for POST WAIT,
146  * so we could find a POST WAIT reply easier in the reply handler.
147  */
148  msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
149  wait->tcntxt = tcntxt++;
150  msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
151 
152  wait->wq = &wq;
153  /*
154  * we add elements to the head, because if a entry in the list will
155  * never be removed, we have to iterate over it every time
156  */
157  list_add(&wait->list, &i2o_exec_wait_list);
158 
159  /*
160  * Post the message to the controller. At some point later it will
161  * return. If we time out before it returns then complete will be zero.
162  */
163  i2o_msg_post(c, msg);
164 
165  wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ);
166 
167  spin_lock_irqsave(&wait->lock, flags);
168 
169  wait->wq = NULL;
170 
171  if (wait->complete)
172  rc = le32_to_cpu(wait->msg->body[0]) >> 24;
173  else {
174  /*
175  * We cannot remove it now. This is important. When it does
176  * terminate (which it must do if the controller has not
177  * died...) then it will otherwise scribble on stuff.
178  *
179  * FIXME: try abort message
180  */
181  if (dma)
182  dma->virt = NULL;
183 
184  rc = -ETIMEDOUT;
185  }
186 
187  spin_unlock_irqrestore(&wait->lock, flags);
188 
189  if (rc != -ETIMEDOUT) {
190  i2o_flush_reply(c, wait->m);
191  i2o_exec_wait_free(wait);
192  }
193 
194  return rc;
195 };
196 
216 static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
217  struct i2o_message *msg, u32 context)
218 {
219  struct i2o_exec_wait *wait, *tmp;
220  unsigned long flags;
221  int rc = 1;
222 
223  /*
224  * We need to search through the i2o_exec_wait_list to see if the given
225  * message is still outstanding. If not, it means that the IOP took
226  * longer to respond to the message than we had allowed and timer has
227  * already expired. Not much we can do about that except log it for
228  * debug purposes, increase timeout, and recompile.
229  */
230  list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
231  if (wait->tcntxt == context) {
232  spin_lock_irqsave(&wait->lock, flags);
233 
234  list_del(&wait->list);
235 
236  wait->m = m;
237  wait->msg = msg;
238  wait->complete = 1;
239 
240  if (wait->wq)
241  rc = 0;
242  else
243  rc = -1;
244 
245  spin_unlock_irqrestore(&wait->lock, flags);
246 
247  if (rc) {
248  struct device *dev;
249 
250  dev = &c->pdev->dev;
251 
252  pr_debug("%s: timedout reply received!\n",
253  c->name);
254  i2o_dma_free(dev, &wait->dma);
255  i2o_exec_wait_free(wait);
256  } else
257  wake_up_interruptible(wait->wq);
258 
259  return rc;
260  }
261  }
262 
263  osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
264  context);
265 
266  return -1;
267 };
268 
277 static ssize_t i2o_exec_show_vendor_id(struct device *d,
278  struct device_attribute *attr, char *buf)
279 {
280  struct i2o_device *dev = to_i2o_device(d);
281  u16 id;
282 
283  if (!i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
284  sprintf(buf, "0x%04x", le16_to_cpu(id));
285  return strlen(buf) + 1;
286  }
287 
288  return 0;
289 };
290 
299 static ssize_t i2o_exec_show_product_id(struct device *d,
300  struct device_attribute *attr,
301  char *buf)
302 {
303  struct i2o_device *dev = to_i2o_device(d);
304  u16 id;
305 
306  if (!i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
307  sprintf(buf, "0x%04x", le16_to_cpu(id));
308  return strlen(buf) + 1;
309  }
310 
311  return 0;
312 };
313 
314 /* Exec-OSM device attributes */
315 static DEVICE_ATTR(vendor_id, S_IRUGO, i2o_exec_show_vendor_id, NULL);
316 static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL);
317 
327 static int i2o_exec_probe(struct device *dev)
328 {
329  struct i2o_device *i2o_dev = to_i2o_device(dev);
330  int rc;
331 
332  rc = i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
333  if (rc) goto err_out;
334 
335  rc = device_create_file(dev, &dev_attr_vendor_id);
336  if (rc) goto err_evtreg;
337  rc = device_create_file(dev, &dev_attr_product_id);
338  if (rc) goto err_vid;
339 
340  i2o_dev->iop->exec = i2o_dev;
341 
342  return 0;
343 
344 err_vid:
345  device_remove_file(dev, &dev_attr_vendor_id);
346 err_evtreg:
348 err_out:
349  return rc;
350 };
351 
360 static int i2o_exec_remove(struct device *dev)
361 {
362  device_remove_file(dev, &dev_attr_product_id);
363  device_remove_file(dev, &dev_attr_vendor_id);
364 
366 
367  return 0;
368 };
369 
370 #ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
371 
381 static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
382 {
383  i2o_status_block *sb = c->status_block.virt;
384  struct device *dev;
385  struct i2o_message *msg;
386 
387  mutex_lock(&c->lct_lock);
388 
389  dev = &c->pdev->dev;
390 
391  if (i2o_dma_realloc(dev, &c->dlct,
393  mutex_unlock(&c->lct_lock);
394  return -ENOMEM;
395  }
396 
398  if (IS_ERR(msg)) {
399  mutex_unlock(&c->lct_lock);
400  return PTR_ERR(msg);
401  }
402 
404  msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
405  ADAPTER_TID);
406  msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
407  msg->u.s.tcntxt = cpu_to_le32(0x00000000);
408  msg->body[0] = cpu_to_le32(0xffffffff);
409  msg->body[1] = cpu_to_le32(change_ind);
410  msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
411  msg->body[3] = cpu_to_le32(c->dlct.phys);
412 
413  i2o_msg_post(c, msg);
414 
415  mutex_unlock(&c->lct_lock);
416 
417  return 0;
418 }
419 #endif
420 
429 static void i2o_exec_lct_modified(struct work_struct *_work)
430 {
432  container_of(_work, struct i2o_exec_lct_notify_work, work);
433  u32 change_ind = 0;
434  struct i2o_controller *c = work->c;
435 
436  kfree(work);
437 
438  if (i2o_device_parse_lct(c) != -EAGAIN)
439  change_ind = c->lct->change_ind + 1;
440 
441 #ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
442  i2o_exec_lct_notify(c, change_ind);
443 #endif
444 };
445 
460 static int i2o_exec_reply(struct i2o_controller *c, u32 m,
461  struct i2o_message *msg)
462 {
463  u32 context;
464 
465  if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) {
466  struct i2o_message __iomem *pmsg;
467  u32 pm;
468 
469  /*
470  * If Fail bit is set we must take the transaction context of
471  * the preserved message to find the right request again.
472  */
473 
474  pm = le32_to_cpu(msg->body[3]);
475  pmsg = i2o_msg_in_to_virt(c, pm);
476  context = readl(&pmsg->u.s.tcntxt);
477 
478  i2o_report_status(KERN_INFO, "i2o_core", msg);
479 
480  /* Release the preserved msg */
481  i2o_msg_nop_mfa(c, pm);
482  } else
483  context = le32_to_cpu(msg->u.s.tcntxt);
484 
485  if (context & 0x80000000)
486  return i2o_msg_post_wait_complete(c, m, msg, context);
487 
488  if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_LCT_NOTIFY) {
490 
491  pr_debug("%s: LCT notify received\n", c->name);
492 
493  work = kmalloc(sizeof(*work), GFP_ATOMIC);
494  if (!work)
495  return -ENOMEM;
496 
497  work->c = c;
498 
499  INIT_WORK(&work->work, i2o_exec_lct_modified);
500  queue_work(i2o_exec_driver.event_queue, &work->work);
501  return 1;
502  }
503 
504  /*
505  * If this happens, we want to dump the message to the syslog so
506  * it can be sent back to the card manufacturer by the end user
507  * to aid in debugging.
508  *
509  */
510  printk(KERN_WARNING "%s: Unsolicited message reply sent to core!"
511  "Message dumped to syslog\n", c->name);
512  i2o_dump_message(msg);
513 
514  return -EFAULT;
515 }
516 
524 static void i2o_exec_event(struct work_struct *work)
525 {
526  struct i2o_event *evt = container_of(work, struct i2o_event, work);
527 
528  if (likely(evt->i2o_dev))
529  osm_debug("Event received from device: %d\n",
530  evt->i2o_dev->lct_data.tid);
531  kfree(evt);
532 };
533 
545 {
546  struct i2o_message *msg;
547  int i = 0;
548  int rc = -EAGAIN;
549 
550  for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
552  if (IS_ERR(msg))
553  return PTR_ERR(msg);
554 
555  msg->u.head[0] =
557  msg->u.head[1] =
558  cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
559  ADAPTER_TID);
560  msg->body[0] = cpu_to_le32(0xffffffff);
561  msg->body[1] = cpu_to_le32(0x00000000);
562  msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
563  msg->body[3] = cpu_to_le32(c->dlct.phys);
564 
565  rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET);
566  if (rc < 0)
567  break;
568 
569  rc = i2o_device_parse_lct(c);
570  if (rc != -EAGAIN)
571  break;
572  }
573 
574  return rc;
575 }
576 
577 /* Exec OSM driver struct */
579  .name = OSM_NAME,
580  .reply = i2o_exec_reply,
581  .event = i2o_exec_event,
582  .classes = i2o_exec_class_id,
583  .driver = {
584  .probe = i2o_exec_probe,
585  .remove = i2o_exec_remove,
586  },
587 };
588 
597 {
598  return i2o_driver_register(&i2o_exec_driver);
599 };
600 
606 void i2o_exec_exit(void)
607 {
608  i2o_driver_unregister(&i2o_exec_driver);
609 };
610