PageRenderTime 59ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/s390/net/ctcm_fsms.c

https://bitbucket.org/wisechild/galaxy-nexus
C | 2295 lines | 1697 code | 220 blank | 378 comment | 256 complexity | 68d0446a5af5fcb97aeaf2ee7b4fbe1c MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * drivers/s390/net/ctcm_fsms.c
  3. *
  4. * Copyright IBM Corp. 2001, 2007
  5. * Authors: Fritz Elfert (felfert@millenux.com)
  6. * Peter Tiedemann (ptiedem@de.ibm.com)
  7. * MPC additions :
  8. * Belinda Thompson (belindat@us.ibm.com)
  9. * Andy Richter (richtera@us.ibm.com)
  10. */
  11. #undef DEBUG
  12. #undef DEBUGDATA
  13. #undef DEBUGCCW
  14. #define KMSG_COMPONENT "ctcm"
  15. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  16. #include <linux/module.h>
  17. #include <linux/init.h>
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/errno.h>
  21. #include <linux/types.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/timer.h>
  24. #include <linux/bitops.h>
  25. #include <linux/signal.h>
  26. #include <linux/string.h>
  27. #include <linux/ip.h>
  28. #include <linux/if_arp.h>
  29. #include <linux/tcp.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/ctype.h>
  32. #include <net/dst.h>
  33. #include <linux/io.h>
  34. #include <asm/ccwdev.h>
  35. #include <asm/ccwgroup.h>
  36. #include <linux/uaccess.h>
  37. #include <asm/idals.h>
  38. #include "fsm.h"
  39. #include "ctcm_dbug.h"
  40. #include "ctcm_main.h"
  41. #include "ctcm_fsms.h"
  42. const char *dev_state_names[] = {
  43. [DEV_STATE_STOPPED] = "Stopped",
  44. [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX",
  45. [DEV_STATE_STARTWAIT_RX] = "StartWait RX",
  46. [DEV_STATE_STARTWAIT_TX] = "StartWait TX",
  47. [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX",
  48. [DEV_STATE_STOPWAIT_RX] = "StopWait RX",
  49. [DEV_STATE_STOPWAIT_TX] = "StopWait TX",
  50. [DEV_STATE_RUNNING] = "Running",
  51. };
  52. const char *dev_event_names[] = {
  53. [DEV_EVENT_START] = "Start",
  54. [DEV_EVENT_STOP] = "Stop",
  55. [DEV_EVENT_RXUP] = "RX up",
  56. [DEV_EVENT_TXUP] = "TX up",
  57. [DEV_EVENT_RXDOWN] = "RX down",
  58. [DEV_EVENT_TXDOWN] = "TX down",
  59. [DEV_EVENT_RESTART] = "Restart",
  60. };
  61. const char *ctc_ch_event_names[] = {
  62. [CTC_EVENT_IO_SUCCESS] = "ccw_device success",
  63. [CTC_EVENT_IO_EBUSY] = "ccw_device busy",
  64. [CTC_EVENT_IO_ENODEV] = "ccw_device enodev",
  65. [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown",
  66. [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY",
  67. [CTC_EVENT_ATTN] = "Status ATTN",
  68. [CTC_EVENT_BUSY] = "Status BUSY",
  69. [CTC_EVENT_UC_RCRESET] = "Unit check remote reset",
  70. [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset",
  71. [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
  72. [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity",
  73. [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure",
  74. [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity",
  75. [CTC_EVENT_UC_ZERO] = "Unit check ZERO",
  76. [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown",
  77. [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown",
  78. [CTC_EVENT_MC_FAIL] = "Machine check failure",
  79. [CTC_EVENT_MC_GOOD] = "Machine check operational",
  80. [CTC_EVENT_IRQ] = "IRQ normal",
  81. [CTC_EVENT_FINSTAT] = "IRQ final",
  82. [CTC_EVENT_TIMER] = "Timer",
  83. [CTC_EVENT_START] = "Start",
  84. [CTC_EVENT_STOP] = "Stop",
  85. /*
  86. * additional MPC events
  87. */
  88. [CTC_EVENT_SEND_XID] = "XID Exchange",
  89. [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
  90. };
  91. const char *ctc_ch_state_names[] = {
  92. [CTC_STATE_IDLE] = "Idle",
  93. [CTC_STATE_STOPPED] = "Stopped",
  94. [CTC_STATE_STARTWAIT] = "StartWait",
  95. [CTC_STATE_STARTRETRY] = "StartRetry",
  96. [CTC_STATE_SETUPWAIT] = "SetupWait",
  97. [CTC_STATE_RXINIT] = "RX init",
  98. [CTC_STATE_TXINIT] = "TX init",
  99. [CTC_STATE_RX] = "RX",
  100. [CTC_STATE_TX] = "TX",
  101. [CTC_STATE_RXIDLE] = "RX idle",
  102. [CTC_STATE_TXIDLE] = "TX idle",
  103. [CTC_STATE_RXERR] = "RX error",
  104. [CTC_STATE_TXERR] = "TX error",
  105. [CTC_STATE_TERM] = "Terminating",
  106. [CTC_STATE_DTERM] = "Restarting",
  107. [CTC_STATE_NOTOP] = "Not operational",
  108. /*
  109. * additional MPC states
  110. */
  111. [CH_XID0_PENDING] = "Pending XID0 Start",
  112. [CH_XID0_INPROGRESS] = "In XID0 Negotiations ",
  113. [CH_XID7_PENDING] = "Pending XID7 P1 Start",
  114. [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ",
  115. [CH_XID7_PENDING2] = "Pending XID7 P2 Start ",
  116. [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ",
  117. [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ",
  118. };
  119. static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
  120. /*
  121. * ----- static ctcm actions for channel statemachine -----
  122. *
  123. */
  124. static void chx_txdone(fsm_instance *fi, int event, void *arg);
  125. static void chx_rx(fsm_instance *fi, int event, void *arg);
  126. static void chx_rxidle(fsm_instance *fi, int event, void *arg);
  127. static void chx_firstio(fsm_instance *fi, int event, void *arg);
  128. static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
  129. static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
  130. static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
  131. static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
  132. static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
  133. static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
  134. static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
  135. static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
  136. static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
  137. static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
  138. static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
  139. static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
  140. static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
  141. static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
  142. /*
  143. * ----- static ctcmpc actions for ctcmpc channel statemachine -----
  144. *
  145. */
  146. static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
  147. static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
  148. static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
  149. /* shared :
  150. static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
  151. static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
  152. static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
  153. static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
  154. static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
  155. static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
  156. static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
  157. static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
  158. static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
  159. static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
  160. static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
  161. static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
  162. static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
  163. static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
  164. */
  165. static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
  166. static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
  167. static void ctcmpc_chx_resend(fsm_instance *, int, void *);
  168. static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
  169. /**
  170. * Check return code of a preceding ccw_device call, halt_IO etc...
  171. *
  172. * ch : The channel, the error belongs to.
  173. * Returns the error code (!= 0) to inspect.
  174. */
  175. void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
  176. {
  177. CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
  178. "%s(%s): %s: %04x\n",
  179. CTCM_FUNTAIL, ch->id, msg, rc);
  180. switch (rc) {
  181. case -EBUSY:
  182. pr_info("%s: The communication peer is busy\n",
  183. ch->id);
  184. fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
  185. break;
  186. case -ENODEV:
  187. pr_err("%s: The specified target device is not valid\n",
  188. ch->id);
  189. fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
  190. break;
  191. default:
  192. pr_err("An I/O operation resulted in error %04x\n",
  193. rc);
  194. fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
  195. }
  196. }
  197. void ctcm_purge_skb_queue(struct sk_buff_head *q)
  198. {
  199. struct sk_buff *skb;
  200. CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__);
  201. while ((skb = skb_dequeue(q))) {
  202. atomic_dec(&skb->users);
  203. dev_kfree_skb_any(skb);
  204. }
  205. }
  206. /**
  207. * NOP action for statemachines
  208. */
  209. static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
  210. {
  211. }
  212. /*
  213. * Actions for channel - statemachines.
  214. */
  215. /**
  216. * Normal data has been send. Free the corresponding
  217. * skb (it's in io_queue), reset dev->tbusy and
  218. * revert to idle state.
  219. *
  220. * fi An instance of a channel statemachine.
  221. * event The event, just happened.
  222. * arg Generic pointer, casted from channel * upon call.
  223. */
  224. static void chx_txdone(fsm_instance *fi, int event, void *arg)
  225. {
  226. struct channel *ch = arg;
  227. struct net_device *dev = ch->netdev;
  228. struct ctcm_priv *priv = dev->ml_priv;
  229. struct sk_buff *skb;
  230. int first = 1;
  231. int i;
  232. unsigned long duration;
  233. struct timespec done_stamp = current_kernel_time(); /* xtime */
  234. CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
  235. duration =
  236. (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
  237. (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
  238. if (duration > ch->prof.tx_time)
  239. ch->prof.tx_time = duration;
  240. if (ch->irb->scsw.cmd.count != 0)
  241. CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
  242. "%s(%s): TX not complete, remaining %d bytes",
  243. CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
  244. fsm_deltimer(&ch->timer);
  245. while ((skb = skb_dequeue(&ch->io_queue))) {
  246. priv->stats.tx_packets++;
  247. priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
  248. if (first) {
  249. priv->stats.tx_bytes += 2;
  250. first = 0;
  251. }
  252. atomic_dec(&skb->users);
  253. dev_kfree_skb_irq(skb);
  254. }
  255. spin_lock(&ch->collect_lock);
  256. clear_normalized_cda(&ch->ccw[4]);
  257. if (ch->collect_len > 0) {
  258. int rc;
  259. if (ctcm_checkalloc_buffer(ch)) {
  260. spin_unlock(&ch->collect_lock);
  261. return;
  262. }
  263. ch->trans_skb->data = ch->trans_skb_data;
  264. skb_reset_tail_pointer(ch->trans_skb);
  265. ch->trans_skb->len = 0;
  266. if (ch->prof.maxmulti < (ch->collect_len + 2))
  267. ch->prof.maxmulti = ch->collect_len + 2;
  268. if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
  269. ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
  270. *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
  271. i = 0;
  272. while ((skb = skb_dequeue(&ch->collect_queue))) {
  273. skb_copy_from_linear_data(skb,
  274. skb_put(ch->trans_skb, skb->len), skb->len);
  275. priv->stats.tx_packets++;
  276. priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
  277. atomic_dec(&skb->users);
  278. dev_kfree_skb_irq(skb);
  279. i++;
  280. }
  281. ch->collect_len = 0;
  282. spin_unlock(&ch->collect_lock);
  283. ch->ccw[1].count = ch->trans_skb->len;
  284. fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
  285. ch->prof.send_stamp = current_kernel_time(); /* xtime */
  286. rc = ccw_device_start(ch->cdev, &ch->ccw[0],
  287. (unsigned long)ch, 0xff, 0);
  288. ch->prof.doios_multi++;
  289. if (rc != 0) {
  290. priv->stats.tx_dropped += i;
  291. priv->stats.tx_errors += i;
  292. fsm_deltimer(&ch->timer);
  293. ctcm_ccw_check_rc(ch, rc, "chained TX");
  294. }
  295. } else {
  296. spin_unlock(&ch->collect_lock);
  297. fsm_newstate(fi, CTC_STATE_TXIDLE);
  298. }
  299. ctcm_clear_busy_do(dev);
  300. }
  301. /**
  302. * Initial data is sent.
  303. * Notify device statemachine that we are up and
  304. * running.
  305. *
  306. * fi An instance of a channel statemachine.
  307. * event The event, just happened.
  308. * arg Generic pointer, casted from channel * upon call.
  309. */
  310. void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
  311. {
  312. struct channel *ch = arg;
  313. struct net_device *dev = ch->netdev;
  314. struct ctcm_priv *priv = dev->ml_priv;
  315. CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
  316. fsm_deltimer(&ch->timer);
  317. fsm_newstate(fi, CTC_STATE_TXIDLE);
  318. fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
  319. }
  320. /**
  321. * Got normal data, check for sanity, queue it up, allocate new buffer
  322. * trigger bottom half, and initiate next read.
  323. *
  324. * fi An instance of a channel statemachine.
  325. * event The event, just happened.
  326. * arg Generic pointer, casted from channel * upon call.
  327. */
  328. static void chx_rx(fsm_instance *fi, int event, void *arg)
  329. {
  330. struct channel *ch = arg;
  331. struct net_device *dev = ch->netdev;
  332. struct ctcm_priv *priv = dev->ml_priv;
  333. int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
  334. struct sk_buff *skb = ch->trans_skb;
  335. __u16 block_len = *((__u16 *)skb->data);
  336. int check_len;
  337. int rc;
  338. fsm_deltimer(&ch->timer);
  339. if (len < 8) {
  340. CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
  341. "%s(%s): got packet with length %d < 8\n",
  342. CTCM_FUNTAIL, dev->name, len);
  343. priv->stats.rx_dropped++;
  344. priv->stats.rx_length_errors++;
  345. goto again;
  346. }
  347. if (len > ch->max_bufsize) {
  348. CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
  349. "%s(%s): got packet with length %d > %d\n",
  350. CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
  351. priv->stats.rx_dropped++;
  352. priv->stats.rx_length_errors++;
  353. goto again;
  354. }
  355. /*
  356. * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
  357. */
  358. switch (ch->protocol) {
  359. case CTCM_PROTO_S390:
  360. case CTCM_PROTO_OS390:
  361. check_len = block_len + 2;
  362. break;
  363. default:
  364. check_len = block_len;
  365. break;
  366. }
  367. if ((len < block_len) || (len > check_len)) {
  368. CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
  369. "%s(%s): got block length %d != rx length %d\n",
  370. CTCM_FUNTAIL, dev->name, block_len, len);
  371. if (do_debug)
  372. ctcmpc_dump_skb(skb, 0);
  373. *((__u16 *)skb->data) = len;
  374. priv->stats.rx_dropped++;
  375. priv->stats.rx_length_errors++;
  376. goto again;
  377. }
  378. if (block_len > 2) {
  379. *((__u16 *)skb->data) = block_len - 2;
  380. ctcm_unpack_skb(ch, skb);
  381. }
  382. again:
  383. skb->data = ch->trans_skb_data;
  384. skb_reset_tail_pointer(skb);
  385. skb->len = 0;
  386. if (ctcm_checkalloc_buffer(ch))
  387. return;
  388. ch->ccw[1].count = ch->max_bufsize;
  389. rc = ccw_device_start(ch->cdev, &ch->ccw[0],
  390. (unsigned long)ch, 0xff, 0);
  391. if (rc != 0)
  392. ctcm_ccw_check_rc(ch, rc, "normal RX");
  393. }
  394. /**
  395. * Initialize connection by sending a __u16 of value 0.
  396. *
  397. * fi An instance of a channel statemachine.
  398. * event The event, just happened.
  399. * arg Generic pointer, casted from channel * upon call.
  400. */
  401. static void chx_firstio(fsm_instance *fi, int event, void *arg)
  402. {
  403. int rc;
  404. struct channel *ch = arg;
  405. int fsmstate = fsm_getstate(fi);
  406. CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
  407. "%s(%s) : %02x",
  408. CTCM_FUNTAIL, ch->id, fsmstate);
  409. ch->sense_rc = 0; /* reset unit check report control */
  410. if (fsmstate == CTC_STATE_TXIDLE)
  411. CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
  412. "%s(%s): remote side issued READ?, init.\n",
  413. CTCM_FUNTAIL, ch->id);
  414. fsm_deltimer(&ch->timer);
  415. if (ctcm_checkalloc_buffer(ch))
  416. return;
  417. if ((fsmstate == CTC_STATE_SETUPWAIT) &&
  418. (ch->protocol == CTCM_PROTO_OS390)) {
  419. /* OS/390 resp. z/OS */
  420. if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
  421. *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
  422. fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
  423. CTC_EVENT_TIMER, ch);
  424. chx_rxidle(fi, event, arg);
  425. } else {
  426. struct net_device *dev = ch->netdev;
  427. struct ctcm_priv *priv = dev->ml_priv;
  428. fsm_newstate(fi, CTC_STATE_TXIDLE);
  429. fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
  430. }
  431. return;
  432. }
  433. /*
  434. * Don't setup a timer for receiving the initial RX frame
  435. * if in compatibility mode, since VM TCP delays the initial
  436. * frame until it has some data to send.
  437. */
  438. if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
  439. (ch->protocol != CTCM_PROTO_S390))
  440. fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
  441. *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
  442. ch->ccw[1].count = 2; /* Transfer only length */
  443. fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
  444. ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
  445. rc = ccw_device_start(ch->cdev, &ch->ccw[0],
  446. (unsigned long)ch, 0xff, 0);
  447. if (rc != 0) {
  448. fsm_deltimer(&ch->timer);
  449. fsm_newstate(fi, CTC_STATE_SETUPWAIT);
  450. ctcm_ccw_check_rc(ch, rc, "init IO");
  451. }
  452. /*
  453. * If in compatibility mode since we don't setup a timer, we
  454. * also signal RX channel up immediately. This enables us
  455. * to send packets early which in turn usually triggers some
  456. * reply from VM TCP which brings up the RX channel to it's
  457. * final state.
  458. */
  459. if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
  460. (ch->protocol == CTCM_PROTO_S390)) {
  461. struct net_device *dev = ch->netdev;
  462. struct ctcm_priv *priv = dev->ml_priv;
  463. fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
  464. }
  465. }
  466. /**
  467. * Got initial data, check it. If OK,
  468. * notify device statemachine that we are up and
  469. * running.
  470. *
  471. * fi An instance of a channel statemachine.
  472. * event The event, just happened.
  473. * arg Generic pointer, casted from channel * upon call.
  474. */
  475. static void chx_rxidle(fsm_instance *fi, int event, void *arg)
  476. {
  477. struct channel *ch = arg;
  478. struct net_device *dev = ch->netdev;
  479. struct ctcm_priv *priv = dev->ml_priv;
  480. __u16 buflen;
  481. int rc;
  482. fsm_deltimer(&ch->timer);
  483. buflen = *((__u16 *)ch->trans_skb->data);
  484. CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n",
  485. __func__, dev->name, buflen);
  486. if (buflen >= CTCM_INITIAL_BLOCKLEN) {
  487. if (ctcm_checkalloc_buffer(ch))
  488. return;
  489. ch->ccw[1].count = ch->max_bufsize;
  490. fsm_newstate(fi, CTC_STATE_RXIDLE);
  491. rc = ccw_device_start(ch->cdev, &ch->ccw[0],
  492. (unsigned long)ch, 0xff, 0);
  493. if (rc != 0) {
  494. fsm_newstate(fi, CTC_STATE_RXINIT);
  495. ctcm_ccw_check_rc(ch, rc, "initial RX");
  496. } else
  497. fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
  498. } else {
  499. CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n",
  500. __func__, dev->name,
  501. buflen, CTCM_INITIAL_BLOCKLEN);
  502. chx_firstio(fi, event, arg);
  503. }
  504. }
  505. /**
  506. * Set channel into extended mode.
  507. *
  508. * fi An instance of a channel statemachine.
  509. * event The event, just happened.
  510. * arg Generic pointer, casted from channel * upon call.
  511. */
  512. static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
  513. {
  514. struct channel *ch = arg;
  515. int rc;
  516. unsigned long saveflags = 0;
  517. int timeout = CTCM_TIME_5_SEC;
  518. fsm_deltimer(&ch->timer);
  519. if (IS_MPC(ch)) {
  520. timeout = 1500;
  521. CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
  522. __func__, smp_processor_id(), ch, ch->id);
  523. }
  524. fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
  525. fsm_newstate(fi, CTC_STATE_SETUPWAIT);
  526. CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
  527. if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
  528. spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
  529. /* Such conditional locking is undeterministic in
  530. * static view. => ignore sparse warnings here. */
  531. rc = ccw_device_start(ch->cdev, &ch->ccw[6],
  532. (unsigned long)ch, 0xff, 0);
  533. if (event == CTC_EVENT_TIMER) /* see above comments */
  534. spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
  535. if (rc != 0) {
  536. fsm_deltimer(&ch->timer);
  537. fsm_newstate(fi, CTC_STATE_STARTWAIT);
  538. ctcm_ccw_check_rc(ch, rc, "set Mode");
  539. } else
  540. ch->retry = 0;
  541. }
  542. /**
  543. * Setup channel.
  544. *
  545. * fi An instance of a channel statemachine.
  546. * event The event, just happened.
  547. * arg Generic pointer, casted from channel * upon call.
  548. */
  549. static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
  550. {
  551. struct channel *ch = arg;
  552. unsigned long saveflags;
  553. int rc;
  554. CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
  555. CTCM_FUNTAIL, ch->id,
  556. (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
  557. if (ch->trans_skb != NULL) {
  558. clear_normalized_cda(&ch->ccw[1]);
  559. dev_kfree_skb(ch->trans_skb);
  560. ch->trans_skb = NULL;
  561. }
  562. if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
  563. ch->ccw[1].cmd_code = CCW_CMD_READ;
  564. ch->ccw[1].flags = CCW_FLAG_SLI;
  565. ch->ccw[1].count = 0;
  566. } else {
  567. ch->ccw[1].cmd_code = CCW_CMD_WRITE;
  568. ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  569. ch->ccw[1].count = 0;
  570. }
  571. if (ctcm_checkalloc_buffer(ch)) {
  572. CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
  573. "%s(%s): %s trans_skb alloc delayed "
  574. "until first transfer",
  575. CTCM_FUNTAIL, ch->id,
  576. (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
  577. "RX" : "TX");
  578. }
  579. ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
  580. ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  581. ch->ccw[0].count = 0;
  582. ch->ccw[0].cda = 0;
  583. ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
  584. ch->ccw[2].flags = CCW_FLAG_SLI;
  585. ch->ccw[2].count = 0;
  586. ch->ccw[2].cda = 0;
  587. memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
  588. ch->ccw[4].cda = 0;
  589. ch->ccw[4].flags &= ~CCW_FLAG_IDA;
  590. fsm_newstate(fi, CTC_STATE_STARTWAIT);
  591. fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
  592. spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
  593. rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
  594. spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
  595. if (rc != 0) {
  596. if (rc != -EBUSY)
  597. fsm_deltimer(&ch->timer);
  598. ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
  599. }
  600. }
  601. /**
  602. * Shutdown a channel.
  603. *
  604. * fi An instance of a channel statemachine.
  605. * event The event, just happened.
  606. * arg Generic pointer, casted from channel * upon call.
  607. */
  608. static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
  609. {
  610. struct channel *ch = arg;
  611. unsigned long saveflags = 0;
  612. int rc;
  613. int oldstate;
  614. fsm_deltimer(&ch->timer);
  615. if (IS_MPC(ch))
  616. fsm_deltimer(&ch->sweep_timer);
  617. fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
  618. if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */
  619. spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
  620. /* Such conditional locking is undeterministic in
  621. * static view. => ignore sparse warnings here. */
  622. oldstate = fsm_getstate(fi);
  623. fsm_newstate(fi, CTC_STATE_TERM);
  624. rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
  625. if (event == CTC_EVENT_STOP)
  626. spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
  627. /* see remark above about conditional locking */
  628. if (rc != 0 && rc != -EBUSY) {
  629. fsm_deltimer(&ch->timer);
  630. if (event != CTC_EVENT_STOP) {
  631. fsm_newstate(fi, oldstate);
  632. ctcm_ccw_check_rc(ch, rc, (char *)__func__);
  633. }
  634. }
  635. }
  636. /**
  637. * Cleanup helper for chx_fail and chx_stopped
  638. * cleanup channels queue and notify interface statemachine.
  639. *
  640. * fi An instance of a channel statemachine.
  641. * state The next state (depending on caller).
  642. * ch The channel to operate on.
  643. */
  644. static void ctcm_chx_cleanup(fsm_instance *fi, int state,
  645. struct channel *ch)
  646. {
  647. struct net_device *dev = ch->netdev;
  648. struct ctcm_priv *priv = dev->ml_priv;
  649. CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
  650. "%s(%s): %s[%d]\n",
  651. CTCM_FUNTAIL, dev->name, ch->id, state);
  652. fsm_deltimer(&ch->timer);
  653. if (IS_MPC(ch))
  654. fsm_deltimer(&ch->sweep_timer);
  655. fsm_newstate(fi, state);
  656. if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
  657. clear_normalized_cda(&ch->ccw[1]);
  658. dev_kfree_skb_any(ch->trans_skb);
  659. ch->trans_skb = NULL;
  660. }
  661. ch->th_seg = 0x00;
  662. ch->th_seq_num = 0x00;
  663. if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
  664. skb_queue_purge(&ch->io_queue);
  665. fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
  666. } else {
  667. ctcm_purge_skb_queue(&ch->io_queue);
  668. if (IS_MPC(ch))
  669. ctcm_purge_skb_queue(&ch->sweep_queue);
  670. spin_lock(&ch->collect_lock);
  671. ctcm_purge_skb_queue(&ch->collect_queue);
  672. ch->collect_len = 0;
  673. spin_unlock(&ch->collect_lock);
  674. fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
  675. }
  676. }
  677. /**
  678. * A channel has successfully been halted.
  679. * Cleanup it's queue and notify interface statemachine.
  680. *
  681. * fi An instance of a channel statemachine.
  682. * event The event, just happened.
  683. * arg Generic pointer, casted from channel * upon call.
  684. */
  685. static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
  686. {
  687. ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
  688. }
  689. /**
  690. * A stop command from device statemachine arrived and we are in
  691. * not operational mode. Set state to stopped.
  692. *
  693. * fi An instance of a channel statemachine.
  694. * event The event, just happened.
  695. * arg Generic pointer, casted from channel * upon call.
  696. */
  697. static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
  698. {
  699. fsm_newstate(fi, CTC_STATE_STOPPED);
  700. }
  701. /**
  702. * A machine check for no path, not operational status or gone device has
  703. * happened.
  704. * Cleanup queue and notify interface statemachine.
  705. *
  706. * fi An instance of a channel statemachine.
  707. * event The event, just happened.
  708. * arg Generic pointer, casted from channel * upon call.
  709. */
  710. static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
  711. {
  712. ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
  713. }
  714. /**
  715. * Handle error during setup of channel.
  716. *
  717. * fi An instance of a channel statemachine.
  718. * event The event, just happened.
  719. * arg Generic pointer, casted from channel * upon call.
  720. */
  721. static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
  722. {
  723. struct channel *ch = arg;
  724. struct net_device *dev = ch->netdev;
  725. struct ctcm_priv *priv = dev->ml_priv;
  726. /*
  727. * Special case: Got UC_RCRESET on setmode.
  728. * This means that remote side isn't setup. In this case
  729. * simply retry after some 10 secs...
  730. */
  731. if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
  732. ((event == CTC_EVENT_UC_RCRESET) ||
  733. (event == CTC_EVENT_UC_RSRESET))) {
  734. fsm_newstate(fi, CTC_STATE_STARTRETRY);
  735. fsm_deltimer(&ch->timer);
  736. fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
  737. if (!IS_MPC(ch) &&
  738. (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
  739. int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
  740. if (rc != 0)
  741. ctcm_ccw_check_rc(ch, rc,
  742. "HaltIO in chx_setuperr");
  743. }
  744. return;
  745. }
  746. CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
  747. "%s(%s) : %s error during %s channel setup state=%s\n",
  748. CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
  749. (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
  750. fsm_getstate_str(fi));
  751. if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
  752. fsm_newstate(fi, CTC_STATE_RXERR);
  753. fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
  754. } else {
  755. fsm_newstate(fi, CTC_STATE_TXERR);
  756. fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
  757. }
  758. }
  759. /**
  760. * Restart a channel after an error.
  761. *
  762. * fi An instance of a channel statemachine.
  763. * event The event, just happened.
  764. * arg Generic pointer, casted from channel * upon call.
  765. */
  766. static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
  767. {
  768. struct channel *ch = arg;
  769. struct net_device *dev = ch->netdev;
  770. unsigned long saveflags = 0;
  771. int oldstate;
  772. int rc;
  773. CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
  774. "%s: %s[%d] of %s\n",
  775. CTCM_FUNTAIL, ch->id, event, dev->name);
  776. fsm_deltimer(&ch->timer);
  777. fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
  778. oldstate = fsm_getstate(fi);
  779. fsm_newstate(fi, CTC_STATE_STARTWAIT);
  780. if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
  781. spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
  782. /* Such conditional locking is a known problem for
  783. * sparse because its undeterministic in static view.
  784. * Warnings should be ignored here. */
  785. rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
  786. if (event == CTC_EVENT_TIMER)
  787. spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
  788. if (rc != 0) {
  789. if (rc != -EBUSY) {
  790. fsm_deltimer(&ch->timer);
  791. fsm_newstate(fi, oldstate);
  792. }
  793. ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
  794. }
  795. }
  796. /**
  797. * Handle error during RX initial handshake (exchange of
  798. * 0-length block header)
  799. *
  800. * fi An instance of a channel statemachine.
  801. * event The event, just happened.
  802. * arg Generic pointer, casted from channel * upon call.
  803. */
  804. static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
  805. {
  806. struct channel *ch = arg;
  807. struct net_device *dev = ch->netdev;
  808. struct ctcm_priv *priv = dev->ml_priv;
  809. if (event == CTC_EVENT_TIMER) {
  810. if (!IS_MPCDEV(dev))
  811. /* TODO : check if MPC deletes timer somewhere */
  812. fsm_deltimer(&ch->timer);
  813. if (ch->retry++ < 3)
  814. ctcm_chx_restart(fi, event, arg);
  815. else {
  816. fsm_newstate(fi, CTC_STATE_RXERR);
  817. fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
  818. }
  819. } else {
  820. CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
  821. "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
  822. ctc_ch_event_names[event], fsm_getstate_str(fi));
  823. dev_warn(&dev->dev,
  824. "Initialization failed with RX/TX init handshake "
  825. "error %s\n", ctc_ch_event_names[event]);
  826. }
  827. }
  828. /**
  829. * Notify device statemachine if we gave up initialization
  830. * of RX channel.
  831. *
  832. * fi An instance of a channel statemachine.
  833. * event The event, just happened.
  834. * arg Generic pointer, casted from channel * upon call.
  835. */
  836. static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
  837. {
  838. struct channel *ch = arg;
  839. struct net_device *dev = ch->netdev;
  840. struct ctcm_priv *priv = dev->ml_priv;
  841. CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
  842. "%s(%s): RX %s busy, init. fail",
  843. CTCM_FUNTAIL, dev->name, ch->id);
  844. fsm_newstate(fi, CTC_STATE_RXERR);
  845. fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
  846. }
  847. /**
  848. * Handle RX Unit check remote reset (remote disconnected)
  849. *
  850. * fi An instance of a channel statemachine.
  851. * event The event, just happened.
  852. * arg Generic pointer, casted from channel * upon call.
  853. */
  854. static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
  855. {
  856. struct channel *ch = arg;
  857. struct channel *ch2;
  858. struct net_device *dev = ch->netdev;
  859. struct ctcm_priv *priv = dev->ml_priv;
  860. CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
  861. "%s: %s: remote disconnect - re-init ...",
  862. CTCM_FUNTAIL, dev->name);
  863. fsm_deltimer(&ch->timer);
  864. /*
  865. * Notify device statemachine
  866. */
  867. fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
  868. fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
  869. fsm_newstate(fi, CTC_STATE_DTERM);
  870. ch2 = priv->channel[CTCM_WRITE];
  871. fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
  872. ccw_device_halt(ch->cdev, (unsigned long)ch);
  873. ccw_device_halt(ch2->cdev, (unsigned long)ch2);
  874. }
  875. /**
  876. * Handle error during TX channel initialization.
  877. *
  878. * fi An instance of a channel statemachine.
  879. * event The event, just happened.
  880. * arg Generic pointer, casted from channel * upon call.
  881. */
  882. static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
  883. {
  884. struct channel *ch = arg;
  885. struct net_device *dev = ch->netdev;
  886. struct ctcm_priv *priv = dev->ml_priv;
  887. if (event == CTC_EVENT_TIMER) {
  888. fsm_deltimer(&ch->timer);
  889. if (ch->retry++ < 3)
  890. ctcm_chx_restart(fi, event, arg);
  891. else {
  892. fsm_newstate(fi, CTC_STATE_TXERR);
  893. fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
  894. }
  895. } else {
  896. CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
  897. "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
  898. ctc_ch_event_names[event], fsm_getstate_str(fi));
  899. dev_warn(&dev->dev,
  900. "Initialization failed with RX/TX init handshake "
  901. "error %s\n", ctc_ch_event_names[event]);
  902. }
  903. }
  904. /**
  905. * Handle TX timeout by retrying operation.
  906. *
  907. * fi An instance of a channel statemachine.
  908. * event The event, just happened.
  909. * arg Generic pointer, casted from channel * upon call.
  910. */
  911. static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
  912. {
  913. struct channel *ch = arg;
  914. struct net_device *dev = ch->netdev;
  915. struct ctcm_priv *priv = dev->ml_priv;
  916. struct sk_buff *skb;
  917. CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
  918. __func__, smp_processor_id(), ch, ch->id);
  919. fsm_deltimer(&ch->timer);
  920. if (ch->retry++ > 3) {
  921. struct mpc_group *gptr = priv->mpcg;
  922. CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
  923. "%s: %s: retries exceeded",
  924. CTCM_FUNTAIL, ch->id);
  925. fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
  926. /* call restart if not MPC or if MPC and mpcg fsm is ready.
  927. use gptr as mpc indicator */
  928. if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
  929. ctcm_chx_restart(fi, event, arg);
  930. goto done;
  931. }
  932. CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
  933. "%s : %s: retry %d",
  934. CTCM_FUNTAIL, ch->id, ch->retry);
  935. skb = skb_peek(&ch->io_queue);
  936. if (skb) {
  937. int rc = 0;
  938. unsigned long saveflags = 0;
  939. clear_normalized_cda(&ch->ccw[4]);
  940. ch->ccw[4].count = skb->len;
  941. if (set_normalized_cda(&ch->ccw[4], skb->data)) {
  942. CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
  943. "%s: %s: IDAL alloc failed",
  944. CTCM_FUNTAIL, ch->id);
  945. fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
  946. ctcm_chx_restart(fi, event, arg);
  947. goto done;
  948. }
  949. fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
  950. if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
  951. spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
  952. /* Such conditional locking is a known problem for
  953. * sparse because its undeterministic in static view.
  954. * Warnings should be ignored here. */
  955. if (do_debug_ccw)
  956. ctcmpc_dumpit((char *)&ch->ccw[3],
  957. sizeof(struct ccw1) * 3);
  958. rc = ccw_device_start(ch->cdev, &ch->ccw[3],
  959. (unsigned long)ch, 0xff, 0);
  960. if (event == CTC_EVENT_TIMER)
  961. spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
  962. saveflags);
  963. if (rc != 0) {
  964. fsm_deltimer(&ch->timer);
  965. ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
  966. ctcm_purge_skb_queue(&ch->io_queue);
  967. }
  968. }
  969. done:
  970. return;
  971. }
  972. /**
  973. * Handle fatal errors during an I/O command.
  974. *
  975. * fi An instance of a channel statemachine.
  976. * event The event, just happened.
  977. * arg Generic pointer, casted from channel * upon call.
  978. */
  979. static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
  980. {
  981. struct channel *ch = arg;
  982. struct net_device *dev = ch->netdev;
  983. struct ctcm_priv *priv = dev->ml_priv;
  984. int rd = CHANNEL_DIRECTION(ch->flags);
  985. fsm_deltimer(&ch->timer);
  986. CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
  987. "%s: %s: %s unrecoverable channel error",
  988. CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
  989. if (IS_MPC(ch)) {
  990. priv->stats.tx_dropped++;
  991. priv->stats.tx_errors++;
  992. }
  993. if (rd == CTCM_READ) {
  994. fsm_newstate(fi, CTC_STATE_RXERR);
  995. fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
  996. } else {
  997. fsm_newstate(fi, CTC_STATE_TXERR);
  998. fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
  999. }
  1000. }
  1001. /*
  1002. * The ctcm statemachine for a channel.
  1003. */
  1004. const fsm_node ch_fsm[] = {
  1005. { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
  1006. { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
  1007. { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
  1008. { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
  1009. { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
  1010. { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
  1011. { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
  1012. { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
  1013. { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
  1014. { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
  1015. { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
  1016. { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
  1017. { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
  1018. { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1019. { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1020. { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
  1021. { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
  1022. { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop },
  1023. { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1024. { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
  1025. { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
  1026. { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio },
  1027. { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
  1028. { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
  1029. { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
  1030. { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1031. { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1032. { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
  1033. { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
  1034. { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle },
  1035. { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
  1036. { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
  1037. { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
  1038. { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
  1039. { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1040. { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio },
  1041. { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1042. { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
  1043. { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
  1044. { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx },
  1045. { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
  1046. { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1047. { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1048. { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx },
  1049. { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
  1050. { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
  1051. { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
  1052. { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
  1053. { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
  1054. { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
  1055. { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1056. { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1057. { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
  1058. { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
  1059. { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio },
  1060. { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
  1061. { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
  1062. { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1063. { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1064. { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
  1065. { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
  1066. { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
  1067. { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
  1068. { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
  1069. { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1070. { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
  1071. { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
  1072. { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
  1073. { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
  1074. { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
  1075. { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1076. { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
  1077. { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
  1078. { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone },
  1079. { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry },
  1080. { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry },
  1081. { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
  1082. { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1083. { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1084. { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
  1085. { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
  1086. { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1087. { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1088. };
  1089. int ch_fsm_len = ARRAY_SIZE(ch_fsm);
  1090. /*
  1091. * MPC actions for mpc channel statemachine
  1092. * handling of MPC protocol requires extra
  1093. * statemachine and actions which are prefixed ctcmpc_ .
  1094. * The ctc_ch_states and ctc_ch_state_names,
  1095. * ctc_ch_events and ctc_ch_event_names share the ctcm definitions
  1096. * which are expanded by some elements.
  1097. */
  1098. /*
  1099. * Actions for mpc channel statemachine.
  1100. */
  1101. /**
  1102. * Normal data has been send. Free the corresponding
  1103. * skb (it's in io_queue), reset dev->tbusy and
  1104. * revert to idle state.
  1105. *
  1106. * fi An instance of a channel statemachine.
  1107. * event The event, just happened.
  1108. * arg Generic pointer, casted from channel * upon call.
  1109. */
  1110. static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
  1111. {
  1112. struct channel *ch = arg;
  1113. struct net_device *dev = ch->netdev;
  1114. struct ctcm_priv *priv = dev->ml_priv;
  1115. struct mpc_group *grp = priv->mpcg;
  1116. struct sk_buff *skb;
  1117. int first = 1;
  1118. int i;
  1119. __u32 data_space;
  1120. unsigned long duration;
  1121. struct sk_buff *peekskb;
  1122. int rc;
  1123. struct th_header *header;
  1124. struct pdu *p_header;
  1125. struct timespec done_stamp = current_kernel_time(); /* xtime */
  1126. CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
  1127. __func__, dev->name, smp_processor_id());
  1128. duration =
  1129. (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
  1130. (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
  1131. if (duration > ch->prof.tx_time)
  1132. ch->prof.tx_time = duration;
  1133. if (ch->irb->scsw.cmd.count != 0)
  1134. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
  1135. "%s(%s): TX not complete, remaining %d bytes",
  1136. CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
  1137. fsm_deltimer(&ch->timer);
  1138. while ((skb = skb_dequeue(&ch->io_queue))) {
  1139. priv->stats.tx_packets++;
  1140. priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
  1141. if (first) {
  1142. priv->stats.tx_bytes += 2;
  1143. first = 0;
  1144. }
  1145. atomic_dec(&skb->users);
  1146. dev_kfree_skb_irq(skb);
  1147. }
  1148. spin_lock(&ch->collect_lock);
  1149. clear_normalized_cda(&ch->ccw[4]);
  1150. if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
  1151. spin_unlock(&ch->collect_lock);
  1152. fsm_newstate(fi, CTC_STATE_TXIDLE);
  1153. goto done;
  1154. }
  1155. if (ctcm_checkalloc_buffer(ch)) {
  1156. spin_unlock(&ch->collect_lock);
  1157. goto done;
  1158. }
  1159. ch->trans_skb->data = ch->trans_skb_data;
  1160. skb_reset_tail_pointer(ch->trans_skb);
  1161. ch->trans_skb->len = 0;
  1162. if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
  1163. ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
  1164. if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
  1165. ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
  1166. i = 0;
  1167. p_header = NULL;
  1168. data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
  1169. CTCM_PR_DBGDATA("%s: building trans_skb from collect_q"
  1170. " data_space:%04x\n",
  1171. __func__, data_space);
  1172. while ((skb = skb_dequeue(&ch->collect_queue))) {
  1173. memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
  1174. p_header = (struct pdu *)
  1175. (skb_tail_pointer(ch->trans_skb) - skb->len);
  1176. p_header->pdu_flag = 0x00;
  1177. if (skb->protocol == ntohs(ETH_P_SNAP))
  1178. p_header->pdu_flag |= 0x60;
  1179. else
  1180. p_header->pdu_flag |= 0x20;
  1181. CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
  1182. __func__, ch->trans_skb->len);
  1183. CTCM_PR_DBGDATA("%s: pdu header and data for up"
  1184. " to 32 bytes sent to vtam\n", __func__);
  1185. CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
  1186. ch->collect_len -= skb->len;
  1187. data_space -= skb->len;
  1188. priv->stats.tx_packets++;
  1189. priv->stats.tx_bytes += skb->len;
  1190. atomic_dec(&skb->users);
  1191. dev_kfree_skb_any(skb);
  1192. peekskb = skb_peek(&ch->collect_queue);
  1193. if (peekskb->len > data_space)
  1194. break;
  1195. i++;
  1196. }
  1197. /* p_header points to the last one we handled */
  1198. if (p_header)
  1199. p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/
  1200. header = kzalloc(TH_HEADER_LENGTH, gfp_type());
  1201. if (!header) {
  1202. spin_unlock(&ch->collect_lock);
  1203. fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
  1204. goto done;
  1205. }
  1206. header->th_ch_flag = TH_HAS_PDU; /* Normal data */
  1207. ch->th_seq_num++;
  1208. header->th_seq_num = ch->th_seq_num;
  1209. CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
  1210. __func__, ch->th_seq_num);
  1211. memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
  1212. TH_HEADER_LENGTH); /* put the TH on the packet */
  1213. kfree(header);
  1214. CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
  1215. __func__, ch->trans_skb->len);
  1216. CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
  1217. "data to vtam from collect_q\n", __func__);
  1218. CTCM_D3_DUMP((char *)ch->trans_skb->data,
  1219. min_t(int, ch->trans_skb->len, 50));
  1220. spin_unlock(&ch->collect_lock);
  1221. clear_normalized_cda(&ch->ccw[1]);
  1222. if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
  1223. dev_kfree_skb_any(ch->trans_skb);
  1224. ch->trans_skb = NULL;
  1225. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
  1226. "%s: %s: IDAL alloc failed",
  1227. CTCM_FUNTAIL, ch->id);
  1228. fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
  1229. return;
  1230. }
  1231. ch->ccw[1].count = ch->trans_skb->len;
  1232. fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
  1233. ch->prof.send_stamp = current_kernel_time(); /* xtime */
  1234. if (do_debug_ccw)
  1235. ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
  1236. rc = ccw_device_start(ch->cdev, &ch->ccw[0],
  1237. (unsigned long)ch, 0xff, 0);
  1238. ch->prof.doios_multi++;
  1239. if (rc != 0) {
  1240. priv->stats.tx_dropped += i;
  1241. priv->stats.tx_errors += i;
  1242. fsm_deltimer(&ch->timer);
  1243. ctcm_ccw_check_rc(ch, rc, "chained TX");
  1244. }
  1245. done:
  1246. ctcm_clear_busy(dev);
  1247. return;
  1248. }
  1249. /**
  1250. * Got normal data, check for sanity, queue it up, allocate new buffer
  1251. * trigger bottom half, and initiate next read.
  1252. *
  1253. * fi An instance of a channel statemachine.
  1254. * event The event, just happened.
  1255. * arg Generic pointer, casted from channel * upon call.
  1256. */
  1257. static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
  1258. {
  1259. struct channel *ch = arg;
  1260. struct net_device *dev = ch->netdev;
  1261. struct ctcm_priv *priv = dev->ml_priv;
  1262. struct mpc_group *grp = priv->mpcg;
  1263. struct sk_buff *skb = ch->trans_skb;
  1264. struct sk_buff *new_skb;
  1265. unsigned long saveflags = 0; /* avoids compiler warning */
  1266. int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
  1267. CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n",
  1268. CTCM_FUNTAIL, dev->name, smp_processor_id(),
  1269. ch->id, ch->max_bufsize, len);
  1270. fsm_deltimer(&ch->timer);
  1271. if (skb == NULL) {
  1272. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1273. "%s(%s): TRANS_SKB = NULL",
  1274. CTCM_FUNTAIL, dev->name);
  1275. goto again;
  1276. }
  1277. if (len < TH_HEADER_LENGTH) {
  1278. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1279. "%s(%s): packet length %d to short",
  1280. CTCM_FUNTAIL, dev->name, len);
  1281. priv->stats.rx_dropped++;
  1282. priv->stats.rx_length_errors++;
  1283. } else {
  1284. /* must have valid th header or game over */
  1285. __u32 block_len = len;
  1286. len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
  1287. new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
  1288. if (new_skb == NULL) {
  1289. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1290. "%s(%d): skb allocation failed",
  1291. CTCM_FUNTAIL, dev->name);
  1292. fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
  1293. goto again;
  1294. }
  1295. switch (fsm_getstate(grp->fsm)) {
  1296. case MPCG_STATE_RESET:
  1297. case MPCG_STATE_INOP:
  1298. dev_kfree_skb_any(new_skb);
  1299. break;
  1300. case MPCG_STATE_FLOWC:
  1301. case MPCG_STATE_READY:
  1302. memcpy(skb_put(new_skb, block_len),
  1303. skb->data, block_len);
  1304. skb_queue_tail(&ch->io_queue, new_skb);
  1305. tasklet_schedule(&ch->ch_tasklet);
  1306. break;
  1307. default:
  1308. memcpy(skb_put(new_skb, len), skb->data, len);
  1309. skb_queue_tail(&ch->io_queue, new_skb);
  1310. tasklet_hi_schedule(&ch->ch_tasklet);
  1311. break;
  1312. }
  1313. }
  1314. again:
  1315. switch (fsm_getstate(grp->fsm)) {
  1316. int rc, dolock;
  1317. case MPCG_STATE_FLOWC:
  1318. case MPCG_STATE_READY:
  1319. if (ctcm_checkalloc_buffer(ch))
  1320. break;
  1321. ch->trans_skb->data = ch->trans_skb_data;
  1322. skb_reset_tail_pointer(ch->trans_skb);
  1323. ch->trans_skb->len = 0;
  1324. ch->ccw[1].count = ch->max_bufsize;
  1325. if (do_debug_ccw)
  1326. ctcmpc_dumpit((char *)&ch->ccw[0],
  1327. sizeof(struct ccw1) * 3);
  1328. dolock = !in_irq();
  1329. if (dolock)
  1330. spin_lock_irqsave(
  1331. get_ccwdev_lock(ch->cdev), saveflags);
  1332. rc = ccw_device_start(ch->cdev, &ch->ccw[0],
  1333. (unsigned long)ch, 0xff, 0);
  1334. if (dolock) /* see remark about conditional locking */
  1335. spin_unlock_irqrestore(
  1336. get_ccwdev_lock(ch->cdev), saveflags);
  1337. if (rc != 0)
  1338. ctcm_ccw_check_rc(ch, rc, "normal RX");
  1339. default:
  1340. break;
  1341. }
  1342. CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
  1343. __func__, dev->name, ch, ch->id);
  1344. }
  1345. /**
  1346. * Initialize connection by sending a __u16 of value 0.
  1347. *
  1348. * fi An instance of a channel statemachine.
  1349. * event The event, just happened.
  1350. * arg Generic pointer, casted from channel * upon call.
  1351. */
  1352. static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
  1353. {
  1354. struct channel *ch = arg;
  1355. struct net_device *dev = ch->netdev;
  1356. struct ctcm_priv *priv = dev->ml_priv;
  1357. struct mpc_group *gptr = priv->mpcg;
  1358. CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
  1359. __func__, ch->id, ch);
  1360. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
  1361. "%s: %s: chstate:%i, grpstate:%i, prot:%i\n",
  1362. CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
  1363. fsm_getstate(gptr->fsm), ch->protocol);
  1364. if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
  1365. MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
  1366. fsm_deltimer(&ch->timer);
  1367. if (ctcm_checkalloc_buffer(ch))
  1368. goto done;
  1369. switch (fsm_getstate(fi)) {
  1370. case CTC_STATE_STARTRETRY:
  1371. case CTC_STATE_SETUPWAIT:
  1372. if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
  1373. ctcmpc_chx_rxidle(fi, event, arg);
  1374. } else {
  1375. fsm_newstate(fi, CTC_STATE_TXIDLE);
  1376. fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
  1377. }
  1378. goto done;
  1379. default:
  1380. break;
  1381. };
  1382. fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
  1383. ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
  1384. done:
  1385. CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
  1386. __func__, ch->id, ch);
  1387. return;
  1388. }
  1389. /**
  1390. * Got initial data, check it. If OK,
  1391. * notify device statemachine that we are up and
  1392. * running.
  1393. *
  1394. * fi An instance of a channel statemachine.
  1395. * event The event, just happened.
  1396. * arg Generic pointer, casted from channel * upon call.
  1397. */
  1398. void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
  1399. {
  1400. struct channel *ch = arg;
  1401. struct net_device *dev = ch->netdev;
  1402. struct ctcm_priv *priv = dev->ml_priv;
  1403. struct mpc_group *grp = priv->mpcg;
  1404. int rc;
  1405. unsigned long saveflags = 0; /* avoids compiler warning */
  1406. fsm_deltimer(&ch->timer);
  1407. CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n",
  1408. __func__, ch->id, dev->name, smp_processor_id(),
  1409. fsm_getstate(fi), fsm_getstate(grp->fsm));
  1410. fsm_newstate(fi, CTC_STATE_RXIDLE);
  1411. /* XID processing complete */
  1412. switch (fsm_getstate(grp->fsm)) {
  1413. case MPCG_STATE_FLOWC:
  1414. case MPCG_STATE_READY:
  1415. if (ctcm_checkalloc_buffer(ch))
  1416. goto done;
  1417. ch->trans_skb->data = ch->trans_skb_data;
  1418. skb_reset_tail_pointer(ch->trans_skb);
  1419. ch->trans_skb->len = 0;
  1420. ch->ccw[1].count = ch->max_bufsize;
  1421. CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
  1422. if (event == CTC_EVENT_START)
  1423. /* see remark about conditional locking */
  1424. spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
  1425. rc = ccw_device_start(ch->cdev, &ch->ccw[0],
  1426. (unsigned long)ch, 0xff, 0);
  1427. if (event == CTC_EVENT_START)
  1428. spin_unlock_irqrestore(
  1429. get_ccwdev_lock(ch->cdev), saveflags);
  1430. if (rc != 0) {
  1431. fsm_newstate(fi, CTC_STATE_RXINIT);
  1432. ctcm_ccw_check_rc(ch, rc, "initial RX");
  1433. goto done;
  1434. }
  1435. break;
  1436. default:
  1437. break;
  1438. }
  1439. fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
  1440. done:
  1441. return;
  1442. }
  1443. /*
  1444. * ctcmpc channel FSM action
  1445. * called from several points in ctcmpc_ch_fsm
  1446. * ctcmpc only
  1447. */
  1448. static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
  1449. {
  1450. struct channel *ch = arg;
  1451. struct net_device *dev = ch->netdev;
  1452. struct ctcm_priv *priv = dev->ml_priv;
  1453. struct mpc_group *grp = priv->mpcg;
  1454. CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
  1455. __func__, dev->name, ch->id, ch, smp_processor_id(),
  1456. fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
  1457. switch (fsm_getstate(grp->fsm)) {
  1458. case MPCG_STATE_XID2INITW:
  1459. /* ok..start yside xid exchanges */
  1460. if (!ch->in_mpcgroup)
  1461. break;
  1462. if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) {
  1463. fsm_deltimer(&grp->timer);
  1464. fsm_addtimer(&grp->timer,
  1465. MPC_XID_TIMEOUT_VALUE,
  1466. MPCG_EVENT_TIMER, dev);
  1467. fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
  1468. } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
  1469. /* attn rcvd before xid0 processed via bh */
  1470. fsm_newstate(ch->fsm, CH_XID7_PENDING1);
  1471. break;
  1472. case MPCG_STATE_XID2INITX:
  1473. case MPCG_STATE_XID0IOWAIT:
  1474. case MPCG_STATE_XID0IOWAIX:
  1475. /* attn rcvd before xid0 processed on ch
  1476. but mid-xid0 processing for group */
  1477. if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
  1478. fsm_newstate(ch->fsm, CH_XID7_PENDING1);
  1479. break;
  1480. case MPCG_STATE_XID7INITW:
  1481. case MPCG_STATE_XID7INITX:
  1482. case MPCG_STATE_XID7INITI:
  1483. case MPCG_STATE_XID7INITZ:
  1484. switch (fsm_getstate(ch->fsm)) {
  1485. case CH_XID7_PENDING:
  1486. fsm_newstate(ch->fsm, CH_XID7_PENDING1);
  1487. break;
  1488. case CH_XID7_PENDING2:
  1489. fsm_newstate(ch->fsm, CH_XID7_PENDING3);
  1490. break;
  1491. }
  1492. fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
  1493. break;
  1494. }
  1495. return;
  1496. }
  1497. /*
  1498. * ctcmpc channel FSM action
  1499. * called from one point in ctcmpc_ch_fsm
  1500. * ctcmpc only
  1501. */
  1502. static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
  1503. {
  1504. struct channel *ch = arg;
  1505. struct net_device *dev = ch->netdev;
  1506. struct ctcm_priv *priv = dev->ml_priv;
  1507. struct mpc_group *grp = priv->mpcg;
  1508. CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n",
  1509. __func__, dev->name, ch->id,
  1510. fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
  1511. fsm_deltimer(&ch->timer);
  1512. switch (fsm_getstate(grp->fsm)) {
  1513. case MPCG_STATE_XID0IOWAIT:
  1514. /* vtam wants to be primary.start yside xid exchanges*/
  1515. /* only receive one attn-busy at a time so must not */
  1516. /* change state each time */
  1517. grp->changed_side = 1;
  1518. fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
  1519. break;
  1520. case MPCG_STATE_XID2INITW:
  1521. if (grp->changed_side == 1) {
  1522. grp->changed_side = 2;
  1523. break;
  1524. }
  1525. /* process began via call to establish_conn */
  1526. /* so must report failure instead of reverting */
  1527. /* back to ready-for-xid passive state */
  1528. if (grp->estconnfunc)
  1529. goto done;
  1530. /* this attnbusy is NOT the result of xside xid */
  1531. /* collisions so yside must have been triggered */
  1532. /* by an ATTN that was not intended to start XID */
  1533. /* processing. Revert back to ready-for-xid and */
  1534. /* wait for ATTN interrupt to signal xid start */
  1535. if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
  1536. fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
  1537. fsm_deltimer(&grp->timer);
  1538. goto done;
  1539. }
  1540. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  1541. goto done;
  1542. case MPCG_STATE_XID2INITX:
  1543. /* XID2 was received before ATTN Busy for second
  1544. channel.Send yside xid for second channel.
  1545. */
  1546. if (grp->changed_side == 1) {
  1547. grp->changed_side = 2;
  1548. break;
  1549. }
  1550. case MPCG_STATE_XID0IOWAIX:
  1551. case MPCG_STATE_XID7INITW:
  1552. case MPCG_STATE_XID7INITX:
  1553. case MPCG_STATE_XID7INITI:
  1554. case MPCG_STATE_XID7INITZ:
  1555. default:
  1556. /* multiple attn-busy indicates too out-of-sync */
  1557. /* and they are certainly not being received as part */
  1558. /* of valid mpc group negotiations.. */
  1559. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  1560. goto done;
  1561. }
  1562. if (grp->changed_side == 1) {
  1563. fsm_deltimer(&grp->timer);
  1564. fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
  1565. MPCG_EVENT_TIMER, dev);
  1566. }
  1567. if (ch->in_mpcgroup)
  1568. fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
  1569. else
  1570. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1571. "%s(%s): channel %s not added to group",
  1572. CTCM_FUNTAIL, dev->name, ch->id);
  1573. done:
  1574. return;
  1575. }
  1576. /*
  1577. * ctcmpc channel FSM action
  1578. * called from several points in ctcmpc_ch_fsm
  1579. * ctcmpc only
  1580. */
  1581. static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
  1582. {
  1583. struct channel *ch = arg;
  1584. struct net_device *dev = ch->netdev;
  1585. struct ctcm_priv *priv = dev->ml_priv;
  1586. struct mpc_group *grp = priv->mpcg;
  1587. fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
  1588. return;
  1589. }
  1590. /*
  1591. * ctcmpc channel FSM action
  1592. * called from several points in ctcmpc_ch_fsm
  1593. * ctcmpc only
  1594. */
  1595. static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
  1596. {
  1597. struct channel *ach = arg;
  1598. struct net_device *dev = ach->netdev;
  1599. struct ctcm_priv *priv = dev->ml_priv;
  1600. struct mpc_group *grp = priv->mpcg;
  1601. struct channel *wch = priv->channel[CTCM_WRITE];
  1602. struct channel *rch = priv->channel[CTCM_READ];
  1603. struct sk_buff *skb;
  1604. struct th_sweep *header;
  1605. int rc = 0;
  1606. unsigned long saveflags = 0;
  1607. CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
  1608. __func__, smp_processor_id(), ach, ach->id);
  1609. if (grp->in_sweep == 0)
  1610. goto done;
  1611. CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" ,
  1612. __func__, wch->th_seq_num);
  1613. CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" ,
  1614. __func__, rch->th_seq_num);
  1615. if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
  1616. /* give the previous IO time to complete */
  1617. fsm_addtimer(&wch->sweep_timer,
  1618. 200, CTC_EVENT_RSWEEP_TIMER, wch);
  1619. goto done;
  1620. }
  1621. skb = skb_dequeue(&wch->sweep_queue);
  1622. if (!skb)
  1623. goto done;
  1624. if (set_normalized_cda(&wch->ccw[4], skb->data)) {
  1625. grp->in_sweep = 0;
  1626. ctcm_clear_busy_do(dev);
  1627. dev_kfree_skb_any(skb);
  1628. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  1629. goto done;
  1630. } else {
  1631. atomic_inc(&skb->users);
  1632. skb_queue_tail(&wch->io_queue, skb);
  1633. }
  1634. /* send out the sweep */
  1635. wch->ccw[4].count = skb->len;
  1636. header = (struct th_sweep *)skb->data;
  1637. switch (header->th.th_ch_flag) {
  1638. case TH_SWEEP_REQ:
  1639. grp->sweep_req_pend_num--;
  1640. break;
  1641. case TH_SWEEP_RESP:
  1642. grp->sweep_rsp_pend_num--;
  1643. break;
  1644. }
  1645. header->sw.th_last_seq = wch->th_seq_num;
  1646. CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
  1647. CTCM_PR_DBGDATA("%s: sweep packet\n", __func__);
  1648. CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH);
  1649. fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch);
  1650. fsm_newstate(wch->fsm, CTC_STATE_TX);
  1651. spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
  1652. wch->prof.send_stamp = current_kernel_time(); /* xtime */
  1653. rc = ccw_device_start(wch->cdev, &wch->ccw[3],
  1654. (unsigned long) wch, 0xff, 0);
  1655. spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
  1656. if ((grp->sweep_req_pend_num == 0) &&
  1657. (grp->sweep_rsp_pend_num == 0)) {
  1658. grp->in_sweep = 0;
  1659. rch->th_seq_num = 0x00;
  1660. wch->th_seq_num = 0x00;
  1661. ctcm_clear_busy_do(dev);
  1662. }
  1663. CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" ,
  1664. __func__, wch->th_seq_num, rch->th_seq_num);
  1665. if (rc != 0)
  1666. ctcm_ccw_check_rc(wch, rc, "send sweep");
  1667. done:
  1668. return;
  1669. }
  1670. /*
  1671. * The ctcmpc statemachine for a channel.
  1672. */
  1673. const fsm_node ctcmpc_ch_fsm[] = {
  1674. { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
  1675. { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
  1676. { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1677. { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
  1678. { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
  1679. { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
  1680. { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
  1681. { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
  1682. { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
  1683. { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
  1684. { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop },
  1685. { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop },
  1686. { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1687. { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
  1688. { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
  1689. { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
  1690. { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
  1691. { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1692. { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1693. { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
  1694. { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
  1695. { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
  1696. { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1697. { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1698. { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
  1699. { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
  1700. { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
  1701. { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
  1702. { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
  1703. { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
  1704. { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1705. { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1706. { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
  1707. { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
  1708. { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle },
  1709. { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
  1710. { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
  1711. { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
  1712. { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
  1713. { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1714. { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio },
  1715. { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1716. { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop },
  1717. { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
  1718. { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
  1719. { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop },
  1720. { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1721. { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1722. { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
  1723. { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
  1724. { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
  1725. { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
  1726. { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
  1727. { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn },
  1728. { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio },
  1729. { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop },
  1730. { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1731. { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1732. { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
  1733. { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
  1734. { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy },
  1735. { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend },
  1736. { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
  1737. { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
  1738. { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
  1739. { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
  1740. { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop },
  1741. { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1742. { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1743. { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
  1744. { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
  1745. { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
  1746. { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
  1747. { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
  1748. { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend },
  1749. { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
  1750. { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
  1751. { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn },
  1752. { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio },
  1753. { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop },
  1754. { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1755. { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1756. { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
  1757. { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
  1758. { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
  1759. { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
  1760. { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend },
  1761. { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
  1762. { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
  1763. { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn },
  1764. { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio },
  1765. { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop },
  1766. { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1767. { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1768. { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
  1769. { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
  1770. { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
  1771. { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
  1772. { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend },
  1773. { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
  1774. { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
  1775. { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn },
  1776. { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio },
  1777. { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop },
  1778. { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1779. { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1780. { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
  1781. { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
  1782. { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
  1783. { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
  1784. { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend },
  1785. { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
  1786. { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
  1787. { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn },
  1788. { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio },
  1789. { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop },
  1790. { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1791. { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1792. { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
  1793. { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
  1794. { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
  1795. { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
  1796. { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend },
  1797. { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
  1798. { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
  1799. { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
  1800. { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
  1801. { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
  1802. { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
  1803. { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1804. { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1805. { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
  1806. { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
  1807. { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
  1808. { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
  1809. { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
  1810. { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
  1811. { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
  1812. { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1813. { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1814. { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
  1815. { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
  1816. { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
  1817. { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
  1818. { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
  1819. { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
  1820. { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1821. { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1822. { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
  1823. { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
  1824. { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
  1825. { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
  1826. { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
  1827. { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
  1828. { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1829. { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
  1830. { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1831. { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
  1832. { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
  1833. { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
  1834. { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
  1835. { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
  1836. { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1837. { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1838. { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
  1839. { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
  1840. { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone },
  1841. { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
  1842. { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
  1843. { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
  1844. { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1845. { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1846. { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
  1847. { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
  1848. { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
  1849. { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
  1850. { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
  1851. { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1852. { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
  1853. };
  1854. int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
  1855. /*
  1856. * Actions for interface - statemachine.
  1857. */
  1858. /**
  1859. * Startup channels by sending CTC_EVENT_START to each channel.
  1860. *
  1861. * fi An instance of an interface statemachine.
  1862. * event The event, just happened.
  1863. * arg Generic pointer, casted from struct net_device * upon call.
  1864. */
  1865. static void dev_action_start(fsm_instance *fi, int event, void *arg)
  1866. {
  1867. struct net_device *dev = arg;
  1868. struct ctcm_priv *priv = dev->ml_priv;
  1869. int direction;
  1870. CTCMY_DBF_DEV_NAME(SETUP, dev, "");
  1871. fsm_deltimer(&priv->restart_timer);
  1872. fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
  1873. if (IS_MPC(priv))
  1874. priv->mpcg->channels_terminating = 0;
  1875. for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
  1876. struct channel *ch = priv->channel[direction];
  1877. fsm_event(ch->fsm, CTC_EVENT_START, ch);
  1878. }
  1879. }
  1880. /**
  1881. * Shutdown channels by sending CTC_EVENT_STOP to each channel.
  1882. *
  1883. * fi An instance of an interface statemachine.
  1884. * event The event, just happened.
  1885. * arg Generic pointer, casted from struct net_device * upon call.
  1886. */
  1887. static void dev_action_stop(fsm_instance *fi, int event, void *arg)
  1888. {
  1889. int direction;
  1890. struct net_device *dev = arg;
  1891. struct ctcm_priv *priv = dev->ml_priv;
  1892. CTCMY_DBF_DEV_NAME(SETUP, dev, "");
  1893. fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
  1894. for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
  1895. struct channel *ch = priv->channel[direction];
  1896. fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
  1897. ch->th_seq_num = 0x00;
  1898. CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n",
  1899. __func__, ch->th_seq_num);
  1900. }
  1901. if (IS_MPC(priv))
  1902. fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
  1903. }
  1904. static void dev_action_restart(fsm_instance *fi, int event, void *arg)
  1905. {
  1906. int restart_timer;
  1907. struct net_device *dev = arg;
  1908. struct ctcm_priv *priv = dev->ml_priv;
  1909. CTCMY_DBF_DEV_NAME(TRACE, dev, "");
  1910. if (IS_MPC(priv)) {
  1911. restart_timer = CTCM_TIME_1_SEC;
  1912. } else {
  1913. restart_timer = CTCM_TIME_5_SEC;
  1914. }
  1915. dev_info(&dev->dev, "Restarting device\n");
  1916. dev_action_stop(fi, event, arg);
  1917. fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
  1918. if (IS_MPC(priv))
  1919. fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
  1920. /* going back into start sequence too quickly can */
  1921. /* result in the other side becoming unreachable due */
  1922. /* to sense reported when IO is aborted */
  1923. fsm_addtimer(&priv->restart_timer, restart_timer,
  1924. DEV_EVENT_START, dev);
  1925. }
  1926. /**
  1927. * Called from channel statemachine
  1928. * when a channel is up and running.
  1929. *
  1930. * fi An instance of an interface statemachine.
  1931. * event The event, just happened.
  1932. * arg Generic pointer, casted from struct net_device * upon call.
  1933. */
  1934. static void dev_action_chup(fsm_instance *fi, int event, void *arg)
  1935. {
  1936. struct net_device *dev = arg;
  1937. struct ctcm_priv *priv = dev->ml_priv;
  1938. int dev_stat = fsm_getstate(fi);
  1939. CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
  1940. "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL,
  1941. dev->name, dev->ml_priv, dev_stat, event);
  1942. switch (fsm_getstate(fi)) {
  1943. case DEV_STATE_STARTWAIT_RXTX:
  1944. if (event == DEV_EVENT_RXUP)
  1945. fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
  1946. else
  1947. fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
  1948. break;
  1949. case DEV_STATE_STARTWAIT_RX:
  1950. if (event == DEV_EVENT_RXUP) {
  1951. fsm_newstate(fi, DEV_STATE_RUNNING);
  1952. dev_info(&dev->dev,
  1953. "Connected with remote side\n");
  1954. ctcm_clear_busy(dev);
  1955. }
  1956. break;
  1957. case DEV_STATE_STARTWAIT_TX:
  1958. if (event == DEV_EVENT_TXUP) {
  1959. fsm_newstate(fi, DEV_STATE_RUNNING);
  1960. dev_info(&dev->dev,
  1961. "Connected with remote side\n");
  1962. ctcm_clear_busy(dev);
  1963. }
  1964. break;
  1965. case DEV_STATE_STOPWAIT_TX:
  1966. if (event == DEV_EVENT_RXUP)
  1967. fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
  1968. break;
  1969. case DEV_STATE_STOPWAIT_RX:
  1970. if (event == DEV_EVENT_TXUP)
  1971. fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
  1972. break;
  1973. }
  1974. if (IS_MPC(priv)) {
  1975. if (event == DEV_EVENT_RXUP)
  1976. mpc_channel_action(priv->channel[CTCM_READ],
  1977. CTCM_READ, MPC_CHANNEL_ADD);
  1978. else
  1979. mpc_channel_action(priv->channel[CTCM_WRITE],
  1980. CTCM_WRITE, MPC_CHANNEL_ADD);
  1981. }
  1982. }
  1983. /**
  1984. * Called from device statemachine
  1985. * when a channel has been shutdown.
  1986. *
  1987. * fi An instance of an interface statemachine.
  1988. * event The event, just happened.
  1989. * arg Generic pointer, casted from struct net_device * upon call.
  1990. */
  1991. static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
  1992. {
  1993. struct net_device *dev = arg;
  1994. struct ctcm_priv *priv = dev->ml_priv;
  1995. CTCMY_DBF_DEV_NAME(SETUP, dev, "");
  1996. switch (fsm_getstate(fi)) {
  1997. case DEV_STATE_RUNNING:
  1998. if (event == DEV_EVENT_TXDOWN)
  1999. fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
  2000. else
  2001. fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
  2002. break;
  2003. case DEV_STATE_STARTWAIT_RX:
  2004. if (event == DEV_EVENT_TXDOWN)
  2005. fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
  2006. break;
  2007. case DEV_STATE_STARTWAIT_TX:
  2008. if (event == DEV_EVENT_RXDOWN)
  2009. fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
  2010. break;
  2011. case DEV_STATE_STOPWAIT_RXTX:
  2012. if (event == DEV_EVENT_TXDOWN)
  2013. fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
  2014. else
  2015. fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
  2016. break;
  2017. case DEV_STATE_STOPWAIT_RX:
  2018. if (event == DEV_EVENT_RXDOWN)
  2019. fsm_newstate(fi, DEV_STATE_STOPPED);
  2020. break;
  2021. case DEV_STATE_STOPWAIT_TX:
  2022. if (event == DEV_EVENT_TXDOWN)
  2023. fsm_newstate(fi, DEV_STATE_STOPPED);
  2024. break;
  2025. }
  2026. if (IS_MPC(priv)) {
  2027. if (event == DEV_EVENT_RXDOWN)
  2028. mpc_channel_action(priv->channel[CTCM_READ],
  2029. CTCM_READ, MPC_CHANNEL_REMOVE);
  2030. else
  2031. mpc_channel_action(priv->channel[CTCM_WRITE],
  2032. CTCM_WRITE, MPC_CHANNEL_REMOVE);
  2033. }
  2034. }
  2035. const fsm_node dev_fsm[] = {
  2036. { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
  2037. { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
  2038. { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
  2039. { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
  2040. { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
  2041. { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
  2042. { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
  2043. { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
  2044. { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
  2045. { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
  2046. { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
  2047. { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
  2048. { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
  2049. { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
  2050. { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
  2051. { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
  2052. { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
  2053. { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
  2054. { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
  2055. { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
  2056. { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
  2057. { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
  2058. { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
  2059. { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
  2060. { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
  2061. { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
  2062. { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
  2063. { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
  2064. { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
  2065. { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
  2066. { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
  2067. { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
  2068. { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
  2069. { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
  2070. { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop },
  2071. { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop },
  2072. { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
  2073. };
  2074. int dev_fsm_len = ARRAY_SIZE(dev_fsm);
  2075. /* --- This is the END my friend --- */