Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ctcm_fsms.c
Go to the documentation of this file.
1 /*
2  * Copyright IBM Corp. 2001, 2007
3  * Authors: Fritz Elfert ([email protected])
4  * Peter Tiedemann ([email protected])
5  * MPC additions :
6  * Belinda Thompson ([email protected])
7  * Andy Richter ([email protected])
8  */
9 
10 #undef DEBUG
11 #undef DEBUGDATA
12 #undef DEBUGCCW
13 
14 #define KMSG_COMPONENT "ctcm"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/interrupt.h>
24 #include <linux/timer.h>
25 #include <linux/bitops.h>
26 
27 #include <linux/signal.h>
28 #include <linux/string.h>
29 
30 #include <linux/ip.h>
31 #include <linux/if_arp.h>
32 #include <linux/tcp.h>
33 #include <linux/skbuff.h>
34 #include <linux/ctype.h>
35 #include <net/dst.h>
36 
37 #include <linux/io.h>
38 #include <asm/ccwdev.h>
39 #include <asm/ccwgroup.h>
40 #include <linux/uaccess.h>
41 
42 #include <asm/idals.h>
43 
44 #include "fsm.h"
45 
46 #include "ctcm_dbug.h"
47 #include "ctcm_main.h"
48 #include "ctcm_fsms.h"
49 
50 const char *dev_state_names[] = {
51  [DEV_STATE_STOPPED] = "Stopped",
52  [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX",
53  [DEV_STATE_STARTWAIT_RX] = "StartWait RX",
54  [DEV_STATE_STARTWAIT_TX] = "StartWait TX",
55  [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX",
56  [DEV_STATE_STOPWAIT_RX] = "StopWait RX",
57  [DEV_STATE_STOPWAIT_TX] = "StopWait TX",
58  [DEV_STATE_RUNNING] = "Running",
59 };
60 
61 const char *dev_event_names[] = {
62  [DEV_EVENT_START] = "Start",
63  [DEV_EVENT_STOP] = "Stop",
64  [DEV_EVENT_RXUP] = "RX up",
65  [DEV_EVENT_TXUP] = "TX up",
66  [DEV_EVENT_RXDOWN] = "RX down",
67  [DEV_EVENT_TXDOWN] = "TX down",
68  [DEV_EVENT_RESTART] = "Restart",
69 };
70 
71 const char *ctc_ch_event_names[] = {
72  [CTC_EVENT_IO_SUCCESS] = "ccw_device success",
73  [CTC_EVENT_IO_EBUSY] = "ccw_device busy",
74  [CTC_EVENT_IO_ENODEV] = "ccw_device enodev",
75  [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown",
76  [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY",
77  [CTC_EVENT_ATTN] = "Status ATTN",
78  [CTC_EVENT_BUSY] = "Status BUSY",
79  [CTC_EVENT_UC_RCRESET] = "Unit check remote reset",
80  [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset",
81  [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
82  [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity",
83  [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure",
84  [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity",
85  [CTC_EVENT_UC_ZERO] = "Unit check ZERO",
86  [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown",
87  [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown",
88  [CTC_EVENT_MC_FAIL] = "Machine check failure",
89  [CTC_EVENT_MC_GOOD] = "Machine check operational",
90  [CTC_EVENT_IRQ] = "IRQ normal",
91  [CTC_EVENT_FINSTAT] = "IRQ final",
92  [CTC_EVENT_TIMER] = "Timer",
93  [CTC_EVENT_START] = "Start",
94  [CTC_EVENT_STOP] = "Stop",
95  /*
96  * additional MPC events
97  */
98  [CTC_EVENT_SEND_XID] = "XID Exchange",
99  [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
100 };
101 
102 const char *ctc_ch_state_names[] = {
103  [CTC_STATE_IDLE] = "Idle",
104  [CTC_STATE_STOPPED] = "Stopped",
105  [CTC_STATE_STARTWAIT] = "StartWait",
106  [CTC_STATE_STARTRETRY] = "StartRetry",
107  [CTC_STATE_SETUPWAIT] = "SetupWait",
108  [CTC_STATE_RXINIT] = "RX init",
109  [CTC_STATE_TXINIT] = "TX init",
110  [CTC_STATE_RX] = "RX",
111  [CTC_STATE_TX] = "TX",
112  [CTC_STATE_RXIDLE] = "RX idle",
113  [CTC_STATE_TXIDLE] = "TX idle",
114  [CTC_STATE_RXERR] = "RX error",
115  [CTC_STATE_TXERR] = "TX error",
116  [CTC_STATE_TERM] = "Terminating",
117  [CTC_STATE_DTERM] = "Restarting",
118  [CTC_STATE_NOTOP] = "Not operational",
119  /*
120  * additional MPC states
121  */
122  [CH_XID0_PENDING] = "Pending XID0 Start",
123  [CH_XID0_INPROGRESS] = "In XID0 Negotiations ",
124  [CH_XID7_PENDING] = "Pending XID7 P1 Start",
125  [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ",
126  [CH_XID7_PENDING2] = "Pending XID7 P2 Start ",
127  [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ",
128  [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ",
129 };
130 
131 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
132 
133 /*
134  * ----- static ctcm actions for channel statemachine -----
135  *
136 */
137 static void chx_txdone(fsm_instance *fi, int event, void *arg);
138 static void chx_rx(fsm_instance *fi, int event, void *arg);
139 static void chx_rxidle(fsm_instance *fi, int event, void *arg);
140 static void chx_firstio(fsm_instance *fi, int event, void *arg);
141 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
142 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
143 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
144 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
145 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
146 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
147 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
148 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
149 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
150 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
151 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
152 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
153 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
154 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
155 
156 /*
157  * ----- static ctcmpc actions for ctcmpc channel statemachine -----
158  *
159 */
160 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
161 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
162 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
163 /* shared :
164 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
165 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
166 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
167 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
168 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
169 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
170 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
171 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
172 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
173 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
174 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
175 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
176 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
177 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
178 */
179 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
180 static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
181 static void ctcmpc_chx_resend(fsm_instance *, int, void *);
182 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
183 
190 void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
191 {
193  "%s(%s): %s: %04x\n",
194  CTCM_FUNTAIL, ch->id, msg, rc);
195  switch (rc) {
196  case -EBUSY:
197  pr_info("%s: The communication peer is busy\n",
198  ch->id);
199  fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
200  break;
201  case -ENODEV:
202  pr_err("%s: The specified target device is not valid\n",
203  ch->id);
204  fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
205  break;
206  default:
207  pr_err("An I/O operation resulted in error %04x\n",
208  rc);
209  fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
210  }
211 }
212 
214 {
215  struct sk_buff *skb;
216 
217  CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__);
218 
219  while ((skb = skb_dequeue(q))) {
220  atomic_dec(&skb->users);
221  dev_kfree_skb_any(skb);
222  }
223 }
224 
228 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
229 {
230 }
231 
232 /*
233  * Actions for channel - statemachines.
234  */
235 
245 static void chx_txdone(fsm_instance *fi, int event, void *arg)
246 {
247  struct channel *ch = arg;
248  struct net_device *dev = ch->netdev;
249  struct ctcm_priv *priv = dev->ml_priv;
250  struct sk_buff *skb;
251  int first = 1;
252  int i;
253  unsigned long duration;
254  struct timespec done_stamp = current_kernel_time(); /* xtime */
255 
256  CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
257 
258  duration =
259  (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
260  (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
261  if (duration > ch->prof.tx_time)
262  ch->prof.tx_time = duration;
263 
264  if (ch->irb->scsw.cmd.count != 0)
266  "%s(%s): TX not complete, remaining %d bytes",
267  CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
268  fsm_deltimer(&ch->timer);
269  while ((skb = skb_dequeue(&ch->io_queue))) {
270  priv->stats.tx_packets++;
271  priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
272  if (first) {
273  priv->stats.tx_bytes += 2;
274  first = 0;
275  }
276  atomic_dec(&skb->users);
277  dev_kfree_skb_irq(skb);
278  }
279  spin_lock(&ch->collect_lock);
280  clear_normalized_cda(&ch->ccw[4]);
281  if (ch->collect_len > 0) {
282  int rc;
283 
284  if (ctcm_checkalloc_buffer(ch)) {
285  spin_unlock(&ch->collect_lock);
286  return;
287  }
288  ch->trans_skb->data = ch->trans_skb_data;
289  skb_reset_tail_pointer(ch->trans_skb);
290  ch->trans_skb->len = 0;
291  if (ch->prof.maxmulti < (ch->collect_len + 2))
292  ch->prof.maxmulti = ch->collect_len + 2;
293  if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
294  ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
295  *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
296  i = 0;
297  while ((skb = skb_dequeue(&ch->collect_queue))) {
298  skb_copy_from_linear_data(skb,
299  skb_put(ch->trans_skb, skb->len), skb->len);
300  priv->stats.tx_packets++;
301  priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
302  atomic_dec(&skb->users);
303  dev_kfree_skb_irq(skb);
304  i++;
305  }
306  ch->collect_len = 0;
307  spin_unlock(&ch->collect_lock);
308  ch->ccw[1].count = ch->trans_skb->len;
310  ch->prof.send_stamp = current_kernel_time(); /* xtime */
311  rc = ccw_device_start(ch->cdev, &ch->ccw[0],
312  (unsigned long)ch, 0xff, 0);
313  ch->prof.doios_multi++;
314  if (rc != 0) {
315  priv->stats.tx_dropped += i;
316  priv->stats.tx_errors += i;
317  fsm_deltimer(&ch->timer);
318  ctcm_ccw_check_rc(ch, rc, "chained TX");
319  }
320  } else {
321  spin_unlock(&ch->collect_lock);
322  fsm_newstate(fi, CTC_STATE_TXIDLE);
323  }
324  ctcm_clear_busy_do(dev);
325 }
326 
336 void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
337 {
338  struct channel *ch = arg;
339  struct net_device *dev = ch->netdev;
340  struct ctcm_priv *priv = dev->ml_priv;
341 
342  CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
343 
344  fsm_deltimer(&ch->timer);
345  fsm_newstate(fi, CTC_STATE_TXIDLE);
346  fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
347 }
348 
357 static void chx_rx(fsm_instance *fi, int event, void *arg)
358 {
359  struct channel *ch = arg;
360  struct net_device *dev = ch->netdev;
361  struct ctcm_priv *priv = dev->ml_priv;
362  int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
363  struct sk_buff *skb = ch->trans_skb;
364  __u16 block_len = *((__u16 *)skb->data);
365  int check_len;
366  int rc;
367 
368  fsm_deltimer(&ch->timer);
369  if (len < 8) {
371  "%s(%s): got packet with length %d < 8\n",
372  CTCM_FUNTAIL, dev->name, len);
373  priv->stats.rx_dropped++;
374  priv->stats.rx_length_errors++;
375  goto again;
376  }
377  if (len > ch->max_bufsize) {
379  "%s(%s): got packet with length %d > %d\n",
380  CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
381  priv->stats.rx_dropped++;
382  priv->stats.rx_length_errors++;
383  goto again;
384  }
385 
386  /*
387  * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
388  */
389  switch (ch->protocol) {
390  case CTCM_PROTO_S390:
391  case CTCM_PROTO_OS390:
392  check_len = block_len + 2;
393  break;
394  default:
395  check_len = block_len;
396  break;
397  }
398  if ((len < block_len) || (len > check_len)) {
400  "%s(%s): got block length %d != rx length %d\n",
401  CTCM_FUNTAIL, dev->name, block_len, len);
402  if (do_debug)
403  ctcmpc_dump_skb(skb, 0);
404 
405  *((__u16 *)skb->data) = len;
406  priv->stats.rx_dropped++;
407  priv->stats.rx_length_errors++;
408  goto again;
409  }
410  if (block_len > 2) {
411  *((__u16 *)skb->data) = block_len - 2;
412  ctcm_unpack_skb(ch, skb);
413  }
414  again:
415  skb->data = ch->trans_skb_data;
416  skb_reset_tail_pointer(skb);
417  skb->len = 0;
418  if (ctcm_checkalloc_buffer(ch))
419  return;
420  ch->ccw[1].count = ch->max_bufsize;
421  rc = ccw_device_start(ch->cdev, &ch->ccw[0],
422  (unsigned long)ch, 0xff, 0);
423  if (rc != 0)
424  ctcm_ccw_check_rc(ch, rc, "normal RX");
425 }
426 
434 static void chx_firstio(fsm_instance *fi, int event, void *arg)
435 {
436  int rc;
437  struct channel *ch = arg;
438  int fsmstate = fsm_getstate(fi);
439 
441  "%s(%s) : %02x",
442  CTCM_FUNTAIL, ch->id, fsmstate);
443 
444  ch->sense_rc = 0; /* reset unit check report control */
445  if (fsmstate == CTC_STATE_TXIDLE)
447  "%s(%s): remote side issued READ?, init.\n",
448  CTCM_FUNTAIL, ch->id);
449  fsm_deltimer(&ch->timer);
450  if (ctcm_checkalloc_buffer(ch))
451  return;
452  if ((fsmstate == CTC_STATE_SETUPWAIT) &&
453  (ch->protocol == CTCM_PROTO_OS390)) {
454  /* OS/390 resp. z/OS */
455  if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
456  *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
458  CTC_EVENT_TIMER, ch);
459  chx_rxidle(fi, event, arg);
460  } else {
461  struct net_device *dev = ch->netdev;
462  struct ctcm_priv *priv = dev->ml_priv;
463  fsm_newstate(fi, CTC_STATE_TXIDLE);
464  fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
465  }
466  return;
467  }
468  /*
469  * Don't setup a timer for receiving the initial RX frame
470  * if in compatibility mode, since VM TCP delays the initial
471  * frame until it has some data to send.
472  */
473  if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
474  (ch->protocol != CTCM_PROTO_S390))
476 
477  *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
478  ch->ccw[1].count = 2; /* Transfer only length */
479 
480  fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
482  rc = ccw_device_start(ch->cdev, &ch->ccw[0],
483  (unsigned long)ch, 0xff, 0);
484  if (rc != 0) {
485  fsm_deltimer(&ch->timer);
486  fsm_newstate(fi, CTC_STATE_SETUPWAIT);
487  ctcm_ccw_check_rc(ch, rc, "init IO");
488  }
489  /*
490  * If in compatibility mode since we don't setup a timer, we
491  * also signal RX channel up immediately. This enables us
492  * to send packets early which in turn usually triggers some
493  * reply from VM TCP which brings up the RX channel to it's
494  * final state.
495  */
496  if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
497  (ch->protocol == CTCM_PROTO_S390)) {
498  struct net_device *dev = ch->netdev;
499  struct ctcm_priv *priv = dev->ml_priv;
500  fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
501  }
502 }
503 
513 static void chx_rxidle(fsm_instance *fi, int event, void *arg)
514 {
515  struct channel *ch = arg;
516  struct net_device *dev = ch->netdev;
517  struct ctcm_priv *priv = dev->ml_priv;
518  __u16 buflen;
519  int rc;
520 
521  fsm_deltimer(&ch->timer);
522  buflen = *((__u16 *)ch->trans_skb->data);
523  CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n",
524  __func__, dev->name, buflen);
525 
526  if (buflen >= CTCM_INITIAL_BLOCKLEN) {
527  if (ctcm_checkalloc_buffer(ch))
528  return;
529  ch->ccw[1].count = ch->max_bufsize;
530  fsm_newstate(fi, CTC_STATE_RXIDLE);
531  rc = ccw_device_start(ch->cdev, &ch->ccw[0],
532  (unsigned long)ch, 0xff, 0);
533  if (rc != 0) {
534  fsm_newstate(fi, CTC_STATE_RXINIT);
535  ctcm_ccw_check_rc(ch, rc, "initial RX");
536  } else
537  fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
538  } else {
539  CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n",
540  __func__, dev->name,
541  buflen, CTCM_INITIAL_BLOCKLEN);
542  chx_firstio(fi, event, arg);
543  }
544 }
545 
553 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
554 {
555  struct channel *ch = arg;
556  int rc;
557  unsigned long saveflags = 0;
558  int timeout = CTCM_TIME_5_SEC;
559 
560  fsm_deltimer(&ch->timer);
561  if (IS_MPC(ch)) {
562  timeout = 1500;
563  CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
564  __func__, smp_processor_id(), ch, ch->id);
565  }
566  fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
567  fsm_newstate(fi, CTC_STATE_SETUPWAIT);
568  CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
569 
570  if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
571  spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
572  /* Such conditional locking is undeterministic in
573  * static view. => ignore sparse warnings here. */
574 
575  rc = ccw_device_start(ch->cdev, &ch->ccw[6],
576  (unsigned long)ch, 0xff, 0);
577  if (event == CTC_EVENT_TIMER) /* see above comments */
578  spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
579  if (rc != 0) {
580  fsm_deltimer(&ch->timer);
581  fsm_newstate(fi, CTC_STATE_STARTWAIT);
582  ctcm_ccw_check_rc(ch, rc, "set Mode");
583  } else
584  ch->retry = 0;
585 }
586 
594 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
595 {
596  struct channel *ch = arg;
597  unsigned long saveflags;
598  int rc;
599 
600  CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
601  CTCM_FUNTAIL, ch->id,
602  (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
603 
604  if (ch->trans_skb != NULL) {
605  clear_normalized_cda(&ch->ccw[1]);
606  dev_kfree_skb(ch->trans_skb);
607  ch->trans_skb = NULL;
608  }
609  if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
610  ch->ccw[1].cmd_code = CCW_CMD_READ;
611  ch->ccw[1].flags = CCW_FLAG_SLI;
612  ch->ccw[1].count = 0;
613  } else {
614  ch->ccw[1].cmd_code = CCW_CMD_WRITE;
615  ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
616  ch->ccw[1].count = 0;
617  }
618  if (ctcm_checkalloc_buffer(ch)) {
620  "%s(%s): %s trans_skb alloc delayed "
621  "until first transfer",
622  CTCM_FUNTAIL, ch->id,
623  (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
624  "RX" : "TX");
625  }
626  ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
627  ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
628  ch->ccw[0].count = 0;
629  ch->ccw[0].cda = 0;
630  ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
631  ch->ccw[2].flags = CCW_FLAG_SLI;
632  ch->ccw[2].count = 0;
633  ch->ccw[2].cda = 0;
634  memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
635  ch->ccw[4].cda = 0;
636  ch->ccw[4].flags &= ~CCW_FLAG_IDA;
637 
638  fsm_newstate(fi, CTC_STATE_STARTWAIT);
639  fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
640  spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
641  rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
642  spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
643  if (rc != 0) {
644  if (rc != -EBUSY)
645  fsm_deltimer(&ch->timer);
646  ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
647  }
648 }
649 
657 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
658 {
659  struct channel *ch = arg;
660  unsigned long saveflags = 0;
661  int rc;
662  int oldstate;
663 
664  fsm_deltimer(&ch->timer);
665  if (IS_MPC(ch))
667 
669 
670  if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */
671  spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
672  /* Such conditional locking is undeterministic in
673  * static view. => ignore sparse warnings here. */
674  oldstate = fsm_getstate(fi);
675  fsm_newstate(fi, CTC_STATE_TERM);
676  rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
677 
678  if (event == CTC_EVENT_STOP)
679  spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
680  /* see remark above about conditional locking */
681 
682  if (rc != 0 && rc != -EBUSY) {
683  fsm_deltimer(&ch->timer);
684  if (event != CTC_EVENT_STOP) {
685  fsm_newstate(fi, oldstate);
686  ctcm_ccw_check_rc(ch, rc, (char *)__func__);
687  }
688  }
689 }
690 
699 static void ctcm_chx_cleanup(fsm_instance *fi, int state,
700  struct channel *ch)
701 {
702  struct net_device *dev = ch->netdev;
703  struct ctcm_priv *priv = dev->ml_priv;
704 
706  "%s(%s): %s[%d]\n",
707  CTCM_FUNTAIL, dev->name, ch->id, state);
708 
709  fsm_deltimer(&ch->timer);
710  if (IS_MPC(ch))
712 
713  fsm_newstate(fi, state);
714  if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
715  clear_normalized_cda(&ch->ccw[1]);
717  ch->trans_skb = NULL;
718  }
719 
720  ch->th_seg = 0x00;
721  ch->th_seq_num = 0x00;
722  if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
724  fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
725  } else {
727  if (IS_MPC(ch))
729  spin_lock(&ch->collect_lock);
731  ch->collect_len = 0;
732  spin_unlock(&ch->collect_lock);
733  fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
734  }
735 }
736 
745 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
746 {
747  ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
748 }
749 
758 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
759 {
760  fsm_newstate(fi, CTC_STATE_STOPPED);
761 }
762 
772 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
773 {
774  ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
775 }
776 
784 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
785 {
786  struct channel *ch = arg;
787  struct net_device *dev = ch->netdev;
788  struct ctcm_priv *priv = dev->ml_priv;
789 
790  /*
791  * Special case: Got UC_RCRESET on setmode.
792  * This means that remote side isn't setup. In this case
793  * simply retry after some 10 secs...
794  */
795  if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
796  ((event == CTC_EVENT_UC_RCRESET) ||
797  (event == CTC_EVENT_UC_RSRESET))) {
798  fsm_newstate(fi, CTC_STATE_STARTRETRY);
799  fsm_deltimer(&ch->timer);
801  if (!IS_MPC(ch) &&
802  (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
803  int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
804  if (rc != 0)
805  ctcm_ccw_check_rc(ch, rc,
806  "HaltIO in chx_setuperr");
807  }
808  return;
809  }
810 
812  "%s(%s) : %s error during %s channel setup state=%s\n",
813  CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
814  (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
815  fsm_getstate_str(fi));
816 
817  if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
818  fsm_newstate(fi, CTC_STATE_RXERR);
819  fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
820  } else {
821  fsm_newstate(fi, CTC_STATE_TXERR);
822  fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
823  }
824 }
825 
833 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
834 {
835  struct channel *ch = arg;
836  struct net_device *dev = ch->netdev;
837  unsigned long saveflags = 0;
838  int oldstate;
839  int rc;
840 
842  "%s: %s[%d] of %s\n",
843  CTCM_FUNTAIL, ch->id, event, dev->name);
844 
845  fsm_deltimer(&ch->timer);
846 
848  oldstate = fsm_getstate(fi);
849  fsm_newstate(fi, CTC_STATE_STARTWAIT);
850  if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
851  spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
852  /* Such conditional locking is a known problem for
853  * sparse because its undeterministic in static view.
854  * Warnings should be ignored here. */
855  rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
856  if (event == CTC_EVENT_TIMER)
857  spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
858  if (rc != 0) {
859  if (rc != -EBUSY) {
860  fsm_deltimer(&ch->timer);
861  fsm_newstate(fi, oldstate);
862  }
863  ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
864  }
865 }
866 
875 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
876 {
877  struct channel *ch = arg;
878  struct net_device *dev = ch->netdev;
879  struct ctcm_priv *priv = dev->ml_priv;
880 
881  if (event == CTC_EVENT_TIMER) {
882  if (!IS_MPCDEV(dev))
883  /* TODO : check if MPC deletes timer somewhere */
884  fsm_deltimer(&ch->timer);
885  if (ch->retry++ < 3)
886  ctcm_chx_restart(fi, event, arg);
887  else {
888  fsm_newstate(fi, CTC_STATE_RXERR);
889  fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
890  }
891  } else {
893  "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
895 
896  dev_warn(&dev->dev,
897  "Initialization failed with RX/TX init handshake "
898  "error %s\n", ctc_ch_event_names[event]);
899  }
900 }
901 
910 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
911 {
912  struct channel *ch = arg;
913  struct net_device *dev = ch->netdev;
914  struct ctcm_priv *priv = dev->ml_priv;
915 
917  "%s(%s): RX %s busy, init. fail",
918  CTCM_FUNTAIL, dev->name, ch->id);
919  fsm_newstate(fi, CTC_STATE_RXERR);
920  fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
921 }
922 
930 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
931 {
932  struct channel *ch = arg;
933  struct channel *ch2;
934  struct net_device *dev = ch->netdev;
935  struct ctcm_priv *priv = dev->ml_priv;
936 
938  "%s: %s: remote disconnect - re-init ...",
939  CTCM_FUNTAIL, dev->name);
940  fsm_deltimer(&ch->timer);
941  /*
942  * Notify device statemachine
943  */
944  fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
945  fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
946 
947  fsm_newstate(fi, CTC_STATE_DTERM);
948  ch2 = priv->channel[CTCM_WRITE];
949  fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
950 
951  ccw_device_halt(ch->cdev, (unsigned long)ch);
952  ccw_device_halt(ch2->cdev, (unsigned long)ch2);
953 }
954 
962 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
963 {
964  struct channel *ch = arg;
965  struct net_device *dev = ch->netdev;
966  struct ctcm_priv *priv = dev->ml_priv;
967 
968  if (event == CTC_EVENT_TIMER) {
969  fsm_deltimer(&ch->timer);
970  if (ch->retry++ < 3)
971  ctcm_chx_restart(fi, event, arg);
972  else {
973  fsm_newstate(fi, CTC_STATE_TXERR);
974  fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
975  }
976  } else {
978  "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
980 
981  dev_warn(&dev->dev,
982  "Initialization failed with RX/TX init handshake "
983  "error %s\n", ctc_ch_event_names[event]);
984  }
985 }
986 
994 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
995 {
996  struct channel *ch = arg;
997  struct net_device *dev = ch->netdev;
998  struct ctcm_priv *priv = dev->ml_priv;
999  struct sk_buff *skb;
1000 
1001  CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
1002  __func__, smp_processor_id(), ch, ch->id);
1003 
1004  fsm_deltimer(&ch->timer);
1005  if (ch->retry++ > 3) {
1006  struct mpc_group *gptr = priv->mpcg;
1008  "%s: %s: retries exceeded",
1009  CTCM_FUNTAIL, ch->id);
1010  fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1011  /* call restart if not MPC or if MPC and mpcg fsm is ready.
1012  use gptr as mpc indicator */
1013  if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
1014  ctcm_chx_restart(fi, event, arg);
1015  goto done;
1016  }
1017 
1019  "%s : %s: retry %d",
1020  CTCM_FUNTAIL, ch->id, ch->retry);
1021  skb = skb_peek(&ch->io_queue);
1022  if (skb) {
1023  int rc = 0;
1024  unsigned long saveflags = 0;
1025  clear_normalized_cda(&ch->ccw[4]);
1026  ch->ccw[4].count = skb->len;
1027  if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1029  "%s: %s: IDAL alloc failed",
1030  CTCM_FUNTAIL, ch->id);
1031  fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1032  ctcm_chx_restart(fi, event, arg);
1033  goto done;
1034  }
1035  fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
1036  if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
1037  spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1038  /* Such conditional locking is a known problem for
1039  * sparse because its undeterministic in static view.
1040  * Warnings should be ignored here. */
1041  if (do_debug_ccw)
1042  ctcmpc_dumpit((char *)&ch->ccw[3],
1043  sizeof(struct ccw1) * 3);
1044 
1045  rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1046  (unsigned long)ch, 0xff, 0);
1047  if (event == CTC_EVENT_TIMER)
1048  spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1049  saveflags);
1050  if (rc != 0) {
1051  fsm_deltimer(&ch->timer);
1052  ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
1054  }
1055  }
1056 done:
1057  return;
1058 }
1059 
1067 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
1068 {
1069  struct channel *ch = arg;
1070  struct net_device *dev = ch->netdev;
1071  struct ctcm_priv *priv = dev->ml_priv;
1072  int rd = CHANNEL_DIRECTION(ch->flags);
1073 
1074  fsm_deltimer(&ch->timer);
1076  "%s: %s: %s unrecoverable channel error",
1077  CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
1078 
1079  if (IS_MPC(ch)) {
1080  priv->stats.tx_dropped++;
1081  priv->stats.tx_errors++;
1082  }
1083  if (rd == CTCM_READ) {
1084  fsm_newstate(fi, CTC_STATE_RXERR);
1085  fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
1086  } else {
1087  fsm_newstate(fi, CTC_STATE_TXERR);
1088  fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1089  }
1090 }
1091 
1092 /*
1093  * The ctcm statemachine for a channel.
1094  */
1095 const fsm_node ch_fsm[] = {
1096  { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
1097  { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
1098  { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
1099  { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1100 
1101  { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
1102  { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
1103  { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
1104  { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1105  { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
1106 
1107  { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1108  { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
1109  { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1110  { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
1111  { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1112  { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1113 
1114  { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
1115  { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
1116  { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop },
1117  { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1118 
1119  { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1120  { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
1121  { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio },
1122  { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1123  { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1124  { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
1125  { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1126  { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1127 
1128  { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1129  { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
1130  { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle },
1131  { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
1132  { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
1133  { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
1134  { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
1135  { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1136  { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio },
1137  { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1138 
1139  { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
1140  { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
1141  { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx },
1142  { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
1143  { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1144  { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1145  { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx },
1146 
1147  { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1148  { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
1150  { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
1151  { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
1152  { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
1153  { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1154  { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1155 
1156  { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
1157  { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
1158  { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio },
1159  { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1160  { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1161  { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1162  { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1163 
1164  { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
1165  { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
1166  { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
1167  { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1168  { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1169  { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1170 
1171  { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
1172  { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
1173  { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1174  { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1175  { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1176  { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1177 
1178  { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
1179  { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
1180  { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone },
1181  { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry },
1182  { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry },
1183  { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
1184  { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1185  { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1186 
1187  { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
1188  { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
1189  { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1190  { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1191 };
1192 
1193 int ch_fsm_len = ARRAY_SIZE(ch_fsm);
1194 
1195 /*
1196  * MPC actions for mpc channel statemachine
1197  * handling of MPC protocol requires extra
1198  * statemachine and actions which are prefixed ctcmpc_ .
1199  * The ctc_ch_states and ctc_ch_state_names,
1200  * ctc_ch_events and ctc_ch_event_names share the ctcm definitions
1201  * which are expanded by some elements.
1202  */
1203 
1204 /*
1205  * Actions for mpc channel statemachine.
1206  */
1207 
1217 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1218 {
1219  struct channel *ch = arg;
1220  struct net_device *dev = ch->netdev;
1221  struct ctcm_priv *priv = dev->ml_priv;
1222  struct mpc_group *grp = priv->mpcg;
1223  struct sk_buff *skb;
1224  int first = 1;
1225  int i;
1226  __u32 data_space;
1227  unsigned long duration;
1228  struct sk_buff *peekskb;
1229  int rc;
1230  struct th_header *header;
1231  struct pdu *p_header;
1232  struct timespec done_stamp = current_kernel_time(); /* xtime */
1233 
1234  CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
1235  __func__, dev->name, smp_processor_id());
1236 
1237  duration =
1238  (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
1239  (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
1240  if (duration > ch->prof.tx_time)
1241  ch->prof.tx_time = duration;
1242 
1243  if (ch->irb->scsw.cmd.count != 0)
1244  CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
1245  "%s(%s): TX not complete, remaining %d bytes",
1246  CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
1247  fsm_deltimer(&ch->timer);
1248  while ((skb = skb_dequeue(&ch->io_queue))) {
1249  priv->stats.tx_packets++;
1250  priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
1251  if (first) {
1252  priv->stats.tx_bytes += 2;
1253  first = 0;
1254  }
1255  atomic_dec(&skb->users);
1256  dev_kfree_skb_irq(skb);
1257  }
1258  spin_lock(&ch->collect_lock);
1259  clear_normalized_cda(&ch->ccw[4]);
1260  if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
1261  spin_unlock(&ch->collect_lock);
1262  fsm_newstate(fi, CTC_STATE_TXIDLE);
1263  goto done;
1264  }
1265 
1266  if (ctcm_checkalloc_buffer(ch)) {
1267  spin_unlock(&ch->collect_lock);
1268  goto done;
1269  }
1270  ch->trans_skb->data = ch->trans_skb_data;
1271  skb_reset_tail_pointer(ch->trans_skb);
1272  ch->trans_skb->len = 0;
1273  if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
1274  ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
1275  if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
1276  ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
1277  i = 0;
1278  p_header = NULL;
1279  data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
1280 
1281  CTCM_PR_DBGDATA("%s: building trans_skb from collect_q"
1282  " data_space:%04x\n",
1283  __func__, data_space);
1284 
1285  while ((skb = skb_dequeue(&ch->collect_queue))) {
1286  memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
1287  p_header = (struct pdu *)
1288  (skb_tail_pointer(ch->trans_skb) - skb->len);
1289  p_header->pdu_flag = 0x00;
1290  if (skb->protocol == ntohs(ETH_P_SNAP))
1291  p_header->pdu_flag |= 0x60;
1292  else
1293  p_header->pdu_flag |= 0x20;
1294 
1295  CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1296  __func__, ch->trans_skb->len);
1297  CTCM_PR_DBGDATA("%s: pdu header and data for up"
1298  " to 32 bytes sent to vtam\n", __func__);
1299  CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
1300 
1301  ch->collect_len -= skb->len;
1302  data_space -= skb->len;
1303  priv->stats.tx_packets++;
1304  priv->stats.tx_bytes += skb->len;
1305  atomic_dec(&skb->users);
1306  dev_kfree_skb_any(skb);
1307  peekskb = skb_peek(&ch->collect_queue);
1308  if (peekskb->len > data_space)
1309  break;
1310  i++;
1311  }
1312  /* p_header points to the last one we handled */
1313  if (p_header)
1314  p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/
1315  header = kzalloc(TH_HEADER_LENGTH, gfp_type());
1316  if (!header) {
1317  spin_unlock(&ch->collect_lock);
1318  fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1319  goto done;
1320  }
1321  header->th_ch_flag = TH_HAS_PDU; /* Normal data */
1322  ch->th_seq_num++;
1323  header->th_seq_num = ch->th_seq_num;
1324 
1325  CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
1326  __func__, ch->th_seq_num);
1327 
1328  memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
1329  TH_HEADER_LENGTH); /* put the TH on the packet */
1330 
1331  kfree(header);
1332 
1333  CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1334  __func__, ch->trans_skb->len);
1335  CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
1336  "data to vtam from collect_q\n", __func__);
1337  CTCM_D3_DUMP((char *)ch->trans_skb->data,
1338  min_t(int, ch->trans_skb->len, 50));
1339 
1340  spin_unlock(&ch->collect_lock);
1341  clear_normalized_cda(&ch->ccw[1]);
1342 
1343  CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
1344  (void *)(unsigned long)ch->ccw[1].cda,
1345  ch->trans_skb->data);
1346  ch->ccw[1].count = ch->max_bufsize;
1347 
1348  if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
1350  ch->trans_skb = NULL;
1351  CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
1352  "%s: %s: IDAL alloc failed",
1353  CTCM_FUNTAIL, ch->id);
1354  fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1355  return;
1356  }
1357 
1358  CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
1359  (void *)(unsigned long)ch->ccw[1].cda,
1360  ch->trans_skb->data);
1361 
1362  ch->ccw[1].count = ch->trans_skb->len;
1364  ch->prof.send_stamp = current_kernel_time(); /* xtime */
1365  if (do_debug_ccw)
1366  ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1367  rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1368  (unsigned long)ch, 0xff, 0);
1369  ch->prof.doios_multi++;
1370  if (rc != 0) {
1371  priv->stats.tx_dropped += i;
1372  priv->stats.tx_errors += i;
1373  fsm_deltimer(&ch->timer);
1374  ctcm_ccw_check_rc(ch, rc, "chained TX");
1375  }
1376 done:
1377  ctcm_clear_busy(dev);
1378  return;
1379 }
1380 
1389 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
1390 {
1391  struct channel *ch = arg;
1392  struct net_device *dev = ch->netdev;
1393  struct ctcm_priv *priv = dev->ml_priv;
1394  struct mpc_group *grp = priv->mpcg;
1395  struct sk_buff *skb = ch->trans_skb;
1396  struct sk_buff *new_skb;
1397  unsigned long saveflags = 0; /* avoids compiler warning */
1398  int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
1399 
1400  CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n",
1402  ch->id, ch->max_bufsize, len);
1403  fsm_deltimer(&ch->timer);
1404 
1405  if (skb == NULL) {
1406  CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1407  "%s(%s): TRANS_SKB = NULL",
1408  CTCM_FUNTAIL, dev->name);
1409  goto again;
1410  }
1411 
1412  if (len < TH_HEADER_LENGTH) {
1413  CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1414  "%s(%s): packet length %d to short",
1415  CTCM_FUNTAIL, dev->name, len);
1416  priv->stats.rx_dropped++;
1417  priv->stats.rx_length_errors++;
1418  } else {
1419  /* must have valid th header or game over */
1420  __u32 block_len = len;
1421  len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
1422  new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
1423 
1424  if (new_skb == NULL) {
1425  CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1426  "%s(%d): skb allocation failed",
1427  CTCM_FUNTAIL, dev->name);
1428  fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1429  goto again;
1430  }
1431  switch (fsm_getstate(grp->fsm)) {
1432  case MPCG_STATE_RESET:
1433  case MPCG_STATE_INOP:
1434  dev_kfree_skb_any(new_skb);
1435  break;
1436  case MPCG_STATE_FLOWC:
1437  case MPCG_STATE_READY:
1438  memcpy(skb_put(new_skb, block_len),
1439  skb->data, block_len);
1440  skb_queue_tail(&ch->io_queue, new_skb);
1441  tasklet_schedule(&ch->ch_tasklet);
1442  break;
1443  default:
1444  memcpy(skb_put(new_skb, len), skb->data, len);
1445  skb_queue_tail(&ch->io_queue, new_skb);
1446  tasklet_hi_schedule(&ch->ch_tasklet);
1447  break;
1448  }
1449  }
1450 
1451 again:
1452  switch (fsm_getstate(grp->fsm)) {
1453  int rc, dolock;
1454  case MPCG_STATE_FLOWC:
1455  case MPCG_STATE_READY:
1456  if (ctcm_checkalloc_buffer(ch))
1457  break;
1458  ch->trans_skb->data = ch->trans_skb_data;
1459  skb_reset_tail_pointer(ch->trans_skb);
1460  ch->trans_skb->len = 0;
1461  ch->ccw[1].count = ch->max_bufsize;
1462  if (do_debug_ccw)
1463  ctcmpc_dumpit((char *)&ch->ccw[0],
1464  sizeof(struct ccw1) * 3);
1465  dolock = !in_irq();
1466  if (dolock)
1468  get_ccwdev_lock(ch->cdev), saveflags);
1469  rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1470  (unsigned long)ch, 0xff, 0);
1471  if (dolock) /* see remark about conditional locking */
1472  spin_unlock_irqrestore(
1473  get_ccwdev_lock(ch->cdev), saveflags);
1474  if (rc != 0)
1475  ctcm_ccw_check_rc(ch, rc, "normal RX");
1476  default:
1477  break;
1478  }
1479 
1480  CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
1481  __func__, dev->name, ch, ch->id);
1482 
1483 }
1484 
1492 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1493 {
1494  struct channel *ch = arg;
1495  struct net_device *dev = ch->netdev;
1496  struct ctcm_priv *priv = dev->ml_priv;
1497  struct mpc_group *gptr = priv->mpcg;
1498 
1499  CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
1500  __func__, ch->id, ch);
1501 
1502  CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
1503  "%s: %s: chstate:%i, grpstate:%i, prot:%i\n",
1504  CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
1505  fsm_getstate(gptr->fsm), ch->protocol);
1506 
1507  if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
1508  MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
1509 
1510  fsm_deltimer(&ch->timer);
1511  if (ctcm_checkalloc_buffer(ch))
1512  goto done;
1513 
1514  switch (fsm_getstate(fi)) {
1515  case CTC_STATE_STARTRETRY:
1516  case CTC_STATE_SETUPWAIT:
1517  if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
1518  ctcmpc_chx_rxidle(fi, event, arg);
1519  } else {
1520  fsm_newstate(fi, CTC_STATE_TXIDLE);
1521  fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
1522  }
1523  goto done;
1524  default:
1525  break;
1526  }
1527 
1528  fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
1530 
1531 done:
1532  CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
1533  __func__, ch->id, ch);
1534  return;
1535 }
1536 
1546 void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
1547 {
1548  struct channel *ch = arg;
1549  struct net_device *dev = ch->netdev;
1550  struct ctcm_priv *priv = dev->ml_priv;
1551  struct mpc_group *grp = priv->mpcg;
1552  int rc;
1553  unsigned long saveflags = 0; /* avoids compiler warning */
1554 
1555  fsm_deltimer(&ch->timer);
1556  CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n",
1557  __func__, ch->id, dev->name, smp_processor_id(),
1558  fsm_getstate(fi), fsm_getstate(grp->fsm));
1559 
1560  fsm_newstate(fi, CTC_STATE_RXIDLE);
1561  /* XID processing complete */
1562 
1563  switch (fsm_getstate(grp->fsm)) {
1564  case MPCG_STATE_FLOWC:
1565  case MPCG_STATE_READY:
1566  if (ctcm_checkalloc_buffer(ch))
1567  goto done;
1568  ch->trans_skb->data = ch->trans_skb_data;
1569  skb_reset_tail_pointer(ch->trans_skb);
1570  ch->trans_skb->len = 0;
1571  ch->ccw[1].count = ch->max_bufsize;
1572  CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1573  if (event == CTC_EVENT_START)
1574  /* see remark about conditional locking */
1575  spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1576  rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1577  (unsigned long)ch, 0xff, 0);
1578  if (event == CTC_EVENT_START)
1579  spin_unlock_irqrestore(
1580  get_ccwdev_lock(ch->cdev), saveflags);
1581  if (rc != 0) {
1582  fsm_newstate(fi, CTC_STATE_RXINIT);
1583  ctcm_ccw_check_rc(ch, rc, "initial RX");
1584  goto done;
1585  }
1586  break;
1587  default:
1588  break;
1589  }
1590 
1591  fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
1592 done:
1593  return;
1594 }
1595 
1596 /*
1597  * ctcmpc channel FSM action
1598  * called from several points in ctcmpc_ch_fsm
1599  * ctcmpc only
1600  */
1601 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
1602 {
1603  struct channel *ch = arg;
1604  struct net_device *dev = ch->netdev;
1605  struct ctcm_priv *priv = dev->ml_priv;
1606  struct mpc_group *grp = priv->mpcg;
1607 
1608  CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
1609  __func__, dev->name, ch->id, ch, smp_processor_id(),
1611 
1612  switch (fsm_getstate(grp->fsm)) {
1613  case MPCG_STATE_XID2INITW:
1614  /* ok..start yside xid exchanges */
1615  if (!ch->in_mpcgroup)
1616  break;
1617  if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) {
1618  fsm_deltimer(&grp->timer);
1619  fsm_addtimer(&grp->timer,
1621  MPCG_EVENT_TIMER, dev);
1622  fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1623 
1624  } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1625  /* attn rcvd before xid0 processed via bh */
1626  fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1627  break;
1628  case MPCG_STATE_XID2INITX:
1629  case MPCG_STATE_XID0IOWAIT:
1630  case MPCG_STATE_XID0IOWAIX:
1631  /* attn rcvd before xid0 processed on ch
1632  but mid-xid0 processing for group */
1633  if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1634  fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1635  break;
1636  case MPCG_STATE_XID7INITW:
1637  case MPCG_STATE_XID7INITX:
1638  case MPCG_STATE_XID7INITI:
1639  case MPCG_STATE_XID7INITZ:
1640  switch (fsm_getstate(ch->fsm)) {
1641  case CH_XID7_PENDING:
1642  fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1643  break;
1644  case CH_XID7_PENDING2:
1645  fsm_newstate(ch->fsm, CH_XID7_PENDING3);
1646  break;
1647  }
1648  fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
1649  break;
1650  }
1651 
1652  return;
1653 }
1654 
1655 /*
1656  * ctcmpc channel FSM action
1657  * called from one point in ctcmpc_ch_fsm
1658  * ctcmpc only
1659  */
1660 static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
1661 {
1662  struct channel *ch = arg;
1663  struct net_device *dev = ch->netdev;
1664  struct ctcm_priv *priv = dev->ml_priv;
1665  struct mpc_group *grp = priv->mpcg;
1666 
1667  CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n",
1668  __func__, dev->name, ch->id,
1670 
1671  fsm_deltimer(&ch->timer);
1672 
1673  switch (fsm_getstate(grp->fsm)) {
1674  case MPCG_STATE_XID0IOWAIT:
1675  /* vtam wants to be primary.start yside xid exchanges*/
1676  /* only receive one attn-busy at a time so must not */
1677  /* change state each time */
1678  grp->changed_side = 1;
1679  fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
1680  break;
1681  case MPCG_STATE_XID2INITW:
1682  if (grp->changed_side == 1) {
1683  grp->changed_side = 2;
1684  break;
1685  }
1686  /* process began via call to establish_conn */
1687  /* so must report failure instead of reverting */
1688  /* back to ready-for-xid passive state */
1689  if (grp->estconnfunc)
1690  goto done;
1691  /* this attnbusy is NOT the result of xside xid */
1692  /* collisions so yside must have been triggered */
1693  /* by an ATTN that was not intended to start XID */
1694  /* processing. Revert back to ready-for-xid and */
1695  /* wait for ATTN interrupt to signal xid start */
1696  if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
1697  fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
1698  fsm_deltimer(&grp->timer);
1699  goto done;
1700  }
1701  fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1702  goto done;
1703  case MPCG_STATE_XID2INITX:
1704  /* XID2 was received before ATTN Busy for second
1705  channel.Send yside xid for second channel.
1706  */
1707  if (grp->changed_side == 1) {
1708  grp->changed_side = 2;
1709  break;
1710  }
1711  case MPCG_STATE_XID0IOWAIX:
1712  case MPCG_STATE_XID7INITW:
1713  case MPCG_STATE_XID7INITX:
1714  case MPCG_STATE_XID7INITI:
1715  case MPCG_STATE_XID7INITZ:
1716  default:
1717  /* multiple attn-busy indicates too out-of-sync */
1718  /* and they are certainly not being received as part */
1719  /* of valid mpc group negotiations.. */
1720  fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1721  goto done;
1722  }
1723 
1724  if (grp->changed_side == 1) {
1725  fsm_deltimer(&grp->timer);
1727  MPCG_EVENT_TIMER, dev);
1728  }
1729  if (ch->in_mpcgroup)
1730  fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1731  else
1732  CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1733  "%s(%s): channel %s not added to group",
1734  CTCM_FUNTAIL, dev->name, ch->id);
1735 
1736 done:
1737  return;
1738 }
1739 
1740 /*
1741  * ctcmpc channel FSM action
1742  * called from several points in ctcmpc_ch_fsm
1743  * ctcmpc only
1744  */
1745 static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
1746 {
1747  struct channel *ch = arg;
1748  struct net_device *dev = ch->netdev;
1749  struct ctcm_priv *priv = dev->ml_priv;
1750  struct mpc_group *grp = priv->mpcg;
1751 
1752  fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1753  return;
1754 }
1755 
1756 /*
1757  * ctcmpc channel FSM action
1758  * called from several points in ctcmpc_ch_fsm
1759  * ctcmpc only
1760  */
1761 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
1762 {
1763  struct channel *ach = arg;
1764  struct net_device *dev = ach->netdev;
1765  struct ctcm_priv *priv = dev->ml_priv;
1766  struct mpc_group *grp = priv->mpcg;
1767  struct channel *wch = priv->channel[CTCM_WRITE];
1768  struct channel *rch = priv->channel[CTCM_READ];
1769  struct sk_buff *skb;
1770  struct th_sweep *header;
1771  int rc = 0;
1772  unsigned long saveflags = 0;
1773 
1774  CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
1775  __func__, smp_processor_id(), ach, ach->id);
1776 
1777  if (grp->in_sweep == 0)
1778  goto done;
1779 
1780  CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" ,
1781  __func__, wch->th_seq_num);
1782  CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" ,
1783  __func__, rch->th_seq_num);
1784 
1785  if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
1786  /* give the previous IO time to complete */
1787  fsm_addtimer(&wch->sweep_timer,
1788  200, CTC_EVENT_RSWEEP_TIMER, wch);
1789  goto done;
1790  }
1791 
1792  skb = skb_dequeue(&wch->sweep_queue);
1793  if (!skb)
1794  goto done;
1795 
1796  if (set_normalized_cda(&wch->ccw[4], skb->data)) {
1797  grp->in_sweep = 0;
1798  ctcm_clear_busy_do(dev);
1799  dev_kfree_skb_any(skb);
1800  fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1801  goto done;
1802  } else {
1803  atomic_inc(&skb->users);
1804  skb_queue_tail(&wch->io_queue, skb);
1805  }
1806 
1807  /* send out the sweep */
1808  wch->ccw[4].count = skb->len;
1809 
1810  header = (struct th_sweep *)skb->data;
1811  switch (header->th.th_ch_flag) {
1812  case TH_SWEEP_REQ:
1813  grp->sweep_req_pend_num--;
1814  break;
1815  case TH_SWEEP_RESP:
1816  grp->sweep_rsp_pend_num--;
1817  break;
1818  }
1819 
1820  header->sw.th_last_seq = wch->th_seq_num;
1821 
1822  CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
1823  CTCM_PR_DBGDATA("%s: sweep packet\n", __func__);
1824  CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH);
1825 
1827  fsm_newstate(wch->fsm, CTC_STATE_TX);
1828 
1829  spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
1830  wch->prof.send_stamp = current_kernel_time(); /* xtime */
1831  rc = ccw_device_start(wch->cdev, &wch->ccw[3],
1832  (unsigned long) wch, 0xff, 0);
1833  spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
1834 
1835  if ((grp->sweep_req_pend_num == 0) &&
1836  (grp->sweep_rsp_pend_num == 0)) {
1837  grp->in_sweep = 0;
1838  rch->th_seq_num = 0x00;
1839  wch->th_seq_num = 0x00;
1840  ctcm_clear_busy_do(dev);
1841  }
1842 
1843  CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" ,
1844  __func__, wch->th_seq_num, rch->th_seq_num);
1845 
1846  if (rc != 0)
1847  ctcm_ccw_check_rc(wch, rc, "send sweep");
1848 
1849 done:
1850  return;
1851 }
1852 
1853 
1854 /*
1855  * The ctcmpc statemachine for a channel.
1856  */
1857 
1859  { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
1860  { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
1861  { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1862  { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
1863  { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1864 
1865  { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
1866  { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
1867  { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
1868  { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1869  { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
1870  { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop },
1871  { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop },
1872  { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1873 
1874  { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1875  { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
1876  { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1877  { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
1878  { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1879  { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1880 
1881  { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
1882  { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
1883  { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1884  { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1885  { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1886 
1887  { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1888  { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
1889  { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
1890  { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1891  { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1892  { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
1893  { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1894  { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1895 
1896  { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1897  { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
1899  { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
1900  { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
1901  { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
1902  { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
1903  { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1904  { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio },
1905  { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1906 
1907  { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop },
1908  { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1909  { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
1910  { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop },
1911  { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1912  { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1913  { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1914  { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1915  { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1916  { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1917 
1918  { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1919  { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1920  { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio },
1921  { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop },
1922  { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1923  { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1924  { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1925  { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1926  { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy },
1927  { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1928  { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1929 
1930  { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1931  { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1932  { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
1933  { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop },
1934  { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1935  { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1936  { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1937  { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1938  { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1939  { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1940  { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1941  { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1942  { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1943 
1944  { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1945  { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1946  { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio },
1947  { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop },
1948  { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1949  { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1950  { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1951  { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1952  { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1953  { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1954  { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1955  { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1956 
1957  { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1958  { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1959  { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio },
1960  { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop },
1961  { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1962  { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1963  { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1964  { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1965  { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1966  { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1967  { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1968  { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1969 
1970  { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1971  { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1972  { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio },
1973  { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop },
1974  { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1975  { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1976  { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1977  { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1978  { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1979  { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1980  { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1981  { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1982 
1983  { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1984  { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1985  { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio },
1986  { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop },
1987  { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1988  { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1989  { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1990  { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1991  { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1992  { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1993  { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1994  { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1995 
1996  { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
1997  { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
1998  { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1999  { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
2000  { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2001  { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2002  { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2003  { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
2004 
2005  { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
2006  { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
2008  { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
2009  { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
2010  { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
2011  { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2012  { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2013  { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2014 
2015  { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
2016  { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
2017  { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
2018  { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
2019  { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2020  { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2021  { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2022  { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2023 
2024  { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
2025  { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
2026  { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
2027  { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
2028  { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
2029  { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2030  { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2031  { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2032 
2033  { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
2034  { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
2035  { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
2036  { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
2037  { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
2038  { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2039  { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2040 
2041  { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
2042  { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
2043  { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone },
2044  { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
2045  { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2046  { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
2047  { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2048  { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2049  { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2050  { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2051 
2052  { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
2053  { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
2054  { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2055  { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2056  { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2057 };
2058 
2059 int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
2060 
2061 /*
2062  * Actions for interface - statemachine.
2063  */
2064 
2072 static void dev_action_start(fsm_instance *fi, int event, void *arg)
2073 {
2074  struct net_device *dev = arg;
2075  struct ctcm_priv *priv = dev->ml_priv;
2076  int direction;
2077 
2078  CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2079 
2080  fsm_deltimer(&priv->restart_timer);
2081  fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2082  if (IS_MPC(priv))
2083  priv->mpcg->channels_terminating = 0;
2084  for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
2085  struct channel *ch = priv->channel[direction];
2086  fsm_event(ch->fsm, CTC_EVENT_START, ch);
2087  }
2088 }
2089 
2097 static void dev_action_stop(fsm_instance *fi, int event, void *arg)
2098 {
2099  int direction;
2100  struct net_device *dev = arg;
2101  struct ctcm_priv *priv = dev->ml_priv;
2102 
2103  CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2104 
2105  fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2106  for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
2107  struct channel *ch = priv->channel[direction];
2108  fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
2109  ch->th_seq_num = 0x00;
2110  CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n",
2111  __func__, ch->th_seq_num);
2112  }
2113  if (IS_MPC(priv))
2114  fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2115 }
2116 
2117 static void dev_action_restart(fsm_instance *fi, int event, void *arg)
2118 {
2119  int restart_timer;
2120  struct net_device *dev = arg;
2121  struct ctcm_priv *priv = dev->ml_priv;
2122 
2123  CTCMY_DBF_DEV_NAME(TRACE, dev, "");
2124 
2125  if (IS_MPC(priv)) {
2126  restart_timer = CTCM_TIME_1_SEC;
2127  } else {
2128  restart_timer = CTCM_TIME_5_SEC;
2129  }
2130  dev_info(&dev->dev, "Restarting device\n");
2131 
2132  dev_action_stop(fi, event, arg);
2133  fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
2134  if (IS_MPC(priv))
2135  fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2136 
2137  /* going back into start sequence too quickly can */
2138  /* result in the other side becoming unreachable due */
2139  /* to sense reported when IO is aborted */
2140  fsm_addtimer(&priv->restart_timer, restart_timer,
2141  DEV_EVENT_START, dev);
2142 }
2143 
2152 static void dev_action_chup(fsm_instance *fi, int event, void *arg)
2153 {
2154  struct net_device *dev = arg;
2155  struct ctcm_priv *priv = dev->ml_priv;
2156  int dev_stat = fsm_getstate(fi);
2157 
2159  "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL,
2160  dev->name, dev->ml_priv, dev_stat, event);
2161 
2162  switch (fsm_getstate(fi)) {
2164  if (event == DEV_EVENT_RXUP)
2165  fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2166  else
2167  fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2168  break;
2170  if (event == DEV_EVENT_RXUP) {
2171  fsm_newstate(fi, DEV_STATE_RUNNING);
2172  dev_info(&dev->dev,
2173  "Connected with remote side\n");
2174  ctcm_clear_busy(dev);
2175  }
2176  break;
2178  if (event == DEV_EVENT_TXUP) {
2179  fsm_newstate(fi, DEV_STATE_RUNNING);
2180  dev_info(&dev->dev,
2181  "Connected with remote side\n");
2182  ctcm_clear_busy(dev);
2183  }
2184  break;
2185  case DEV_STATE_STOPWAIT_TX:
2186  if (event == DEV_EVENT_RXUP)
2187  fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2188  break;
2189  case DEV_STATE_STOPWAIT_RX:
2190  if (event == DEV_EVENT_TXUP)
2191  fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2192  break;
2193  }
2194 
2195  if (IS_MPC(priv)) {
2196  if (event == DEV_EVENT_RXUP)
2199  else
2202  }
2203 }
2204 
2213 static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
2214 {
2215 
2216  struct net_device *dev = arg;
2217  struct ctcm_priv *priv = dev->ml_priv;
2218 
2219  CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2220 
2221  switch (fsm_getstate(fi)) {
2222  case DEV_STATE_RUNNING:
2223  if (event == DEV_EVENT_TXDOWN)
2224  fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2225  else
2226  fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2227  break;
2229  if (event == DEV_EVENT_TXDOWN)
2230  fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2231  break;
2233  if (event == DEV_EVENT_RXDOWN)
2234  fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2235  break;
2237  if (event == DEV_EVENT_TXDOWN)
2238  fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2239  else
2240  fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2241  break;
2242  case DEV_STATE_STOPWAIT_RX:
2243  if (event == DEV_EVENT_RXDOWN)
2244  fsm_newstate(fi, DEV_STATE_STOPPED);
2245  break;
2246  case DEV_STATE_STOPWAIT_TX:
2247  if (event == DEV_EVENT_TXDOWN)
2248  fsm_newstate(fi, DEV_STATE_STOPPED);
2249  break;
2250  }
2251  if (IS_MPC(priv)) {
2252  if (event == DEV_EVENT_RXDOWN)
2255  else
2258  }
2259 }
2260 
2261 const fsm_node dev_fsm[] = {
2262  { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
2263  { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2264  { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2265  { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2266  { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2267  { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2268  { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2269  { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2270  { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2271  { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2272  { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2273  { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2274  { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2275  { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2276  { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2277  { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2278  { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2279  { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2280  { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2281  { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2282  { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2283  { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2284  { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2285  { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2286  { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2287  { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2288  { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2289  { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2290  { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2291  { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2292  { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2293  { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2294  { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2295  { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2296  { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop },
2297  { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop },
2298  { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2299 };
2300 
2301 int dev_fsm_len = ARRAY_SIZE(dev_fsm);
2302 
2303 /* --- This is the END my friend --- */
2304