PageRenderTime 118ms CodeModel.GetById 21ms app.highlight 86ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/s390/net/ctcm_fsms.c

https://bitbucket.org/wisechild/galaxy-nexus
C | 2295 lines | 1697 code | 220 blank | 378 comment | 256 complexity | 68d0446a5af5fcb97aeaf2ee7b4fbe1c MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * drivers/s390/net/ctcm_fsms.c
   3 *
   4 * Copyright IBM Corp. 2001, 2007
   5 * Authors:	Fritz Elfert (felfert@millenux.com)
   6 * 		Peter Tiedemann (ptiedem@de.ibm.com)
   7 *	MPC additions :
   8 *		Belinda Thompson (belindat@us.ibm.com)
   9 *		Andy Richter (richtera@us.ibm.com)
  10 */
  11
  12#undef DEBUG
  13#undef DEBUGDATA
  14#undef DEBUGCCW
  15
  16#define KMSG_COMPONENT "ctcm"
  17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  18
  19#include <linux/module.h>
  20#include <linux/init.h>
  21#include <linux/kernel.h>
  22#include <linux/slab.h>
  23#include <linux/errno.h>
  24#include <linux/types.h>
  25#include <linux/interrupt.h>
  26#include <linux/timer.h>
  27#include <linux/bitops.h>
  28
  29#include <linux/signal.h>
  30#include <linux/string.h>
  31
  32#include <linux/ip.h>
  33#include <linux/if_arp.h>
  34#include <linux/tcp.h>
  35#include <linux/skbuff.h>
  36#include <linux/ctype.h>
  37#include <net/dst.h>
  38
  39#include <linux/io.h>
  40#include <asm/ccwdev.h>
  41#include <asm/ccwgroup.h>
  42#include <linux/uaccess.h>
  43
  44#include <asm/idals.h>
  45
  46#include "fsm.h"
  47
  48#include "ctcm_dbug.h"
  49#include "ctcm_main.h"
  50#include "ctcm_fsms.h"
  51
  52const char *dev_state_names[] = {
  53	[DEV_STATE_STOPPED]		= "Stopped",
  54	[DEV_STATE_STARTWAIT_RXTX]	= "StartWait RXTX",
  55	[DEV_STATE_STARTWAIT_RX]	= "StartWait RX",
  56	[DEV_STATE_STARTWAIT_TX]	= "StartWait TX",
  57	[DEV_STATE_STOPWAIT_RXTX]	= "StopWait RXTX",
  58	[DEV_STATE_STOPWAIT_RX]		= "StopWait RX",
  59	[DEV_STATE_STOPWAIT_TX]		= "StopWait TX",
  60	[DEV_STATE_RUNNING]		= "Running",
  61};
  62
  63const char *dev_event_names[] = {
  64	[DEV_EVENT_START]	= "Start",
  65	[DEV_EVENT_STOP]	= "Stop",
  66	[DEV_EVENT_RXUP]	= "RX up",
  67	[DEV_EVENT_TXUP]	= "TX up",
  68	[DEV_EVENT_RXDOWN]	= "RX down",
  69	[DEV_EVENT_TXDOWN]	= "TX down",
  70	[DEV_EVENT_RESTART]	= "Restart",
  71};
  72
  73const char *ctc_ch_event_names[] = {
  74	[CTC_EVENT_IO_SUCCESS]	= "ccw_device success",
  75	[CTC_EVENT_IO_EBUSY]	= "ccw_device busy",
  76	[CTC_EVENT_IO_ENODEV]	= "ccw_device enodev",
  77	[CTC_EVENT_IO_UNKNOWN]	= "ccw_device unknown",
  78	[CTC_EVENT_ATTNBUSY]	= "Status ATTN & BUSY",
  79	[CTC_EVENT_ATTN]	= "Status ATTN",
  80	[CTC_EVENT_BUSY]	= "Status BUSY",
  81	[CTC_EVENT_UC_RCRESET]	= "Unit check remote reset",
  82	[CTC_EVENT_UC_RSRESET]	= "Unit check remote system reset",
  83	[CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
  84	[CTC_EVENT_UC_TXPARITY]	= "Unit check TX parity",
  85	[CTC_EVENT_UC_HWFAIL]	= "Unit check Hardware failure",
  86	[CTC_EVENT_UC_RXPARITY]	= "Unit check RX parity",
  87	[CTC_EVENT_UC_ZERO]	= "Unit check ZERO",
  88	[CTC_EVENT_UC_UNKNOWN]	= "Unit check Unknown",
  89	[CTC_EVENT_SC_UNKNOWN]	= "SubChannel check Unknown",
  90	[CTC_EVENT_MC_FAIL]	= "Machine check failure",
  91	[CTC_EVENT_MC_GOOD]	= "Machine check operational",
  92	[CTC_EVENT_IRQ]		= "IRQ normal",
  93	[CTC_EVENT_FINSTAT]	= "IRQ final",
  94	[CTC_EVENT_TIMER]	= "Timer",
  95	[CTC_EVENT_START]	= "Start",
  96	[CTC_EVENT_STOP]	= "Stop",
  97	/*
  98	* additional MPC events
  99	*/
 100	[CTC_EVENT_SEND_XID]	= "XID Exchange",
 101	[CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
 102};
 103
 104const char *ctc_ch_state_names[] = {
 105	[CTC_STATE_IDLE]	= "Idle",
 106	[CTC_STATE_STOPPED]	= "Stopped",
 107	[CTC_STATE_STARTWAIT]	= "StartWait",
 108	[CTC_STATE_STARTRETRY]	= "StartRetry",
 109	[CTC_STATE_SETUPWAIT]	= "SetupWait",
 110	[CTC_STATE_RXINIT]	= "RX init",
 111	[CTC_STATE_TXINIT]	= "TX init",
 112	[CTC_STATE_RX]		= "RX",
 113	[CTC_STATE_TX]		= "TX",
 114	[CTC_STATE_RXIDLE]	= "RX idle",
 115	[CTC_STATE_TXIDLE]	= "TX idle",
 116	[CTC_STATE_RXERR]	= "RX error",
 117	[CTC_STATE_TXERR]	= "TX error",
 118	[CTC_STATE_TERM]	= "Terminating",
 119	[CTC_STATE_DTERM]	= "Restarting",
 120	[CTC_STATE_NOTOP]	= "Not operational",
 121	/*
 122	* additional MPC states
 123	*/
 124	[CH_XID0_PENDING]	= "Pending XID0 Start",
 125	[CH_XID0_INPROGRESS]	= "In XID0 Negotiations ",
 126	[CH_XID7_PENDING]	= "Pending XID7 P1 Start",
 127	[CH_XID7_PENDING1]	= "Active XID7 P1 Exchange ",
 128	[CH_XID7_PENDING2]	= "Pending XID7 P2 Start ",
 129	[CH_XID7_PENDING3]	= "Active XID7 P2 Exchange ",
 130	[CH_XID7_PENDING4]	= "XID7 Complete - Pending READY ",
 131};
 132
 133static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
 134
 135/*
 136 * ----- static ctcm actions for channel statemachine -----
 137 *
 138*/
 139static void chx_txdone(fsm_instance *fi, int event, void *arg);
 140static void chx_rx(fsm_instance *fi, int event, void *arg);
 141static void chx_rxidle(fsm_instance *fi, int event, void *arg);
 142static void chx_firstio(fsm_instance *fi, int event, void *arg);
 143static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
 144static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
 145static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
 146static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
 147static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
 148static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
 149static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
 150static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
 151static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
 152static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
 153static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
 154static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
 155static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
 156static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
 157
 158/*
 159 * ----- static ctcmpc actions for ctcmpc channel statemachine -----
 160 *
 161*/
 162static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
 163static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
 164static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
 165/* shared :
 166static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
 167static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
 168static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
 169static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
 170static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
 171static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
 172static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
 173static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
 174static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
 175static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
 176static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
 177static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
 178static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
 179static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
 180*/
 181static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
 182static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
 183static void ctcmpc_chx_resend(fsm_instance *, int, void *);
 184static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
 185
 186/**
 187 * Check return code of a preceding ccw_device call, halt_IO etc...
 188 *
 189 * ch	:	The channel, the error belongs to.
 190 * Returns the error code (!= 0) to inspect.
 191 */
 192void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
 193{
 194	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
 195		"%s(%s): %s: %04x\n",
 196		CTCM_FUNTAIL, ch->id, msg, rc);
 197	switch (rc) {
 198	case -EBUSY:
 199		pr_info("%s: The communication peer is busy\n",
 200			ch->id);
 201		fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
 202		break;
 203	case -ENODEV:
 204		pr_err("%s: The specified target device is not valid\n",
 205		       ch->id);
 206		fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
 207		break;
 208	default:
 209		pr_err("An I/O operation resulted in error %04x\n",
 210		       rc);
 211		fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
 212	}
 213}
 214
 215void ctcm_purge_skb_queue(struct sk_buff_head *q)
 216{
 217	struct sk_buff *skb;
 218
 219	CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__);
 220
 221	while ((skb = skb_dequeue(q))) {
 222		atomic_dec(&skb->users);
 223		dev_kfree_skb_any(skb);
 224	}
 225}
 226
 227/**
 228 * NOP action for statemachines
 229 */
 230static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
 231{
 232}
 233
 234/*
 235 * Actions for channel - statemachines.
 236 */
 237
 238/**
 239 * Normal data has been send. Free the corresponding
 240 * skb (it's in io_queue), reset dev->tbusy and
 241 * revert to idle state.
 242 *
 243 * fi		An instance of a channel statemachine.
 244 * event	The event, just happened.
 245 * arg		Generic pointer, casted from channel * upon call.
 246 */
 247static void chx_txdone(fsm_instance *fi, int event, void *arg)
 248{
 249	struct channel *ch = arg;
 250	struct net_device *dev = ch->netdev;
 251	struct ctcm_priv *priv = dev->ml_priv;
 252	struct sk_buff *skb;
 253	int first = 1;
 254	int i;
 255	unsigned long duration;
 256	struct timespec done_stamp = current_kernel_time(); /* xtime */
 257
 258	CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
 259
 260	duration =
 261	    (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
 262	    (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
 263	if (duration > ch->prof.tx_time)
 264		ch->prof.tx_time = duration;
 265
 266	if (ch->irb->scsw.cmd.count != 0)
 267		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
 268			"%s(%s): TX not complete, remaining %d bytes",
 269			     CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
 270	fsm_deltimer(&ch->timer);
 271	while ((skb = skb_dequeue(&ch->io_queue))) {
 272		priv->stats.tx_packets++;
 273		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
 274		if (first) {
 275			priv->stats.tx_bytes += 2;
 276			first = 0;
 277		}
 278		atomic_dec(&skb->users);
 279		dev_kfree_skb_irq(skb);
 280	}
 281	spin_lock(&ch->collect_lock);
 282	clear_normalized_cda(&ch->ccw[4]);
 283	if (ch->collect_len > 0) {
 284		int rc;
 285
 286		if (ctcm_checkalloc_buffer(ch)) {
 287			spin_unlock(&ch->collect_lock);
 288			return;
 289		}
 290		ch->trans_skb->data = ch->trans_skb_data;
 291		skb_reset_tail_pointer(ch->trans_skb);
 292		ch->trans_skb->len = 0;
 293		if (ch->prof.maxmulti < (ch->collect_len + 2))
 294			ch->prof.maxmulti = ch->collect_len + 2;
 295		if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
 296			ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
 297		*((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
 298		i = 0;
 299		while ((skb = skb_dequeue(&ch->collect_queue))) {
 300			skb_copy_from_linear_data(skb,
 301				skb_put(ch->trans_skb, skb->len), skb->len);
 302			priv->stats.tx_packets++;
 303			priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
 304			atomic_dec(&skb->users);
 305			dev_kfree_skb_irq(skb);
 306			i++;
 307		}
 308		ch->collect_len = 0;
 309		spin_unlock(&ch->collect_lock);
 310		ch->ccw[1].count = ch->trans_skb->len;
 311		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
 312		ch->prof.send_stamp = current_kernel_time(); /* xtime */
 313		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
 314						(unsigned long)ch, 0xff, 0);
 315		ch->prof.doios_multi++;
 316		if (rc != 0) {
 317			priv->stats.tx_dropped += i;
 318			priv->stats.tx_errors += i;
 319			fsm_deltimer(&ch->timer);
 320			ctcm_ccw_check_rc(ch, rc, "chained TX");
 321		}
 322	} else {
 323		spin_unlock(&ch->collect_lock);
 324		fsm_newstate(fi, CTC_STATE_TXIDLE);
 325	}
 326	ctcm_clear_busy_do(dev);
 327}
 328
 329/**
 330 * Initial data is sent.
 331 * Notify device statemachine that we are up and
 332 * running.
 333 *
 334 * fi		An instance of a channel statemachine.
 335 * event	The event, just happened.
 336 * arg		Generic pointer, casted from channel * upon call.
 337 */
 338void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
 339{
 340	struct channel *ch = arg;
 341	struct net_device *dev = ch->netdev;
 342	struct ctcm_priv *priv = dev->ml_priv;
 343
 344	CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
 345
 346	fsm_deltimer(&ch->timer);
 347	fsm_newstate(fi, CTC_STATE_TXIDLE);
 348	fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
 349}
 350
 351/**
 352 * Got normal data, check for sanity, queue it up, allocate new buffer
 353 * trigger bottom half, and initiate next read.
 354 *
 355 * fi		An instance of a channel statemachine.
 356 * event	The event, just happened.
 357 * arg		Generic pointer, casted from channel * upon call.
 358 */
 359static void chx_rx(fsm_instance *fi, int event, void *arg)
 360{
 361	struct channel *ch = arg;
 362	struct net_device *dev = ch->netdev;
 363	struct ctcm_priv *priv = dev->ml_priv;
 364	int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
 365	struct sk_buff *skb = ch->trans_skb;
 366	__u16 block_len = *((__u16 *)skb->data);
 367	int check_len;
 368	int rc;
 369
 370	fsm_deltimer(&ch->timer);
 371	if (len < 8) {
 372		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
 373			"%s(%s): got packet with length %d < 8\n",
 374					CTCM_FUNTAIL, dev->name, len);
 375		priv->stats.rx_dropped++;
 376		priv->stats.rx_length_errors++;
 377						goto again;
 378	}
 379	if (len > ch->max_bufsize) {
 380		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
 381			"%s(%s): got packet with length %d > %d\n",
 382				CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
 383		priv->stats.rx_dropped++;
 384		priv->stats.rx_length_errors++;
 385						goto again;
 386	}
 387
 388	/*
 389	 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
 390	 */
 391	switch (ch->protocol) {
 392	case CTCM_PROTO_S390:
 393	case CTCM_PROTO_OS390:
 394		check_len = block_len + 2;
 395		break;
 396	default:
 397		check_len = block_len;
 398		break;
 399	}
 400	if ((len < block_len) || (len > check_len)) {
 401		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
 402			"%s(%s): got block length %d != rx length %d\n",
 403				CTCM_FUNTAIL, dev->name, block_len, len);
 404		if (do_debug)
 405			ctcmpc_dump_skb(skb, 0);
 406
 407		*((__u16 *)skb->data) = len;
 408		priv->stats.rx_dropped++;
 409		priv->stats.rx_length_errors++;
 410						goto again;
 411	}
 412	if (block_len > 2) {
 413		*((__u16 *)skb->data) = block_len - 2;
 414		ctcm_unpack_skb(ch, skb);
 415	}
 416 again:
 417	skb->data = ch->trans_skb_data;
 418	skb_reset_tail_pointer(skb);
 419	skb->len = 0;
 420	if (ctcm_checkalloc_buffer(ch))
 421		return;
 422	ch->ccw[1].count = ch->max_bufsize;
 423	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
 424					(unsigned long)ch, 0xff, 0);
 425	if (rc != 0)
 426		ctcm_ccw_check_rc(ch, rc, "normal RX");
 427}
 428
 429/**
 430 * Initialize connection by sending a __u16 of value 0.
 431 *
 432 * fi		An instance of a channel statemachine.
 433 * event	The event, just happened.
 434 * arg		Generic pointer, casted from channel * upon call.
 435 */
 436static void chx_firstio(fsm_instance *fi, int event, void *arg)
 437{
 438	int rc;
 439	struct channel *ch = arg;
 440	int fsmstate = fsm_getstate(fi);
 441
 442	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
 443		"%s(%s) : %02x",
 444		CTCM_FUNTAIL, ch->id, fsmstate);
 445
 446	ch->sense_rc = 0;	/* reset unit check report control */
 447	if (fsmstate == CTC_STATE_TXIDLE)
 448		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
 449			"%s(%s): remote side issued READ?, init.\n",
 450				CTCM_FUNTAIL, ch->id);
 451	fsm_deltimer(&ch->timer);
 452	if (ctcm_checkalloc_buffer(ch))
 453		return;
 454	if ((fsmstate == CTC_STATE_SETUPWAIT) &&
 455	    (ch->protocol == CTCM_PROTO_OS390)) {
 456		/* OS/390 resp. z/OS */
 457		if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
 458			*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
 459			fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
 460				     CTC_EVENT_TIMER, ch);
 461			chx_rxidle(fi, event, arg);
 462		} else {
 463			struct net_device *dev = ch->netdev;
 464			struct ctcm_priv *priv = dev->ml_priv;
 465			fsm_newstate(fi, CTC_STATE_TXIDLE);
 466			fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
 467		}
 468		return;
 469	}
 470	/*
 471	 * Don't setup a timer for receiving the initial RX frame
 472	 * if in compatibility mode, since VM TCP delays the initial
 473	 * frame until it has some data to send.
 474	 */
 475	if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
 476	    (ch->protocol != CTCM_PROTO_S390))
 477		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
 478
 479	*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
 480	ch->ccw[1].count = 2;	/* Transfer only length */
 481
 482	fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
 483		     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
 484	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
 485					(unsigned long)ch, 0xff, 0);
 486	if (rc != 0) {
 487		fsm_deltimer(&ch->timer);
 488		fsm_newstate(fi, CTC_STATE_SETUPWAIT);
 489		ctcm_ccw_check_rc(ch, rc, "init IO");
 490	}
 491	/*
 492	 * If in compatibility mode since we don't setup a timer, we
 493	 * also signal RX channel up immediately. This enables us
 494	 * to send packets early which in turn usually triggers some
 495	 * reply from VM TCP which brings up the RX channel to it's
 496	 * final state.
 497	 */
 498	if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
 499	    (ch->protocol == CTCM_PROTO_S390)) {
 500		struct net_device *dev = ch->netdev;
 501		struct ctcm_priv *priv = dev->ml_priv;
 502		fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
 503	}
 504}
 505
 506/**
 507 * Got initial data, check it. If OK,
 508 * notify device statemachine that we are up and
 509 * running.
 510 *
 511 * fi		An instance of a channel statemachine.
 512 * event	The event, just happened.
 513 * arg		Generic pointer, casted from channel * upon call.
 514 */
 515static void chx_rxidle(fsm_instance *fi, int event, void *arg)
 516{
 517	struct channel *ch = arg;
 518	struct net_device *dev = ch->netdev;
 519	struct ctcm_priv *priv = dev->ml_priv;
 520	__u16 buflen;
 521	int rc;
 522
 523	fsm_deltimer(&ch->timer);
 524	buflen = *((__u16 *)ch->trans_skb->data);
 525	CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n",
 526			__func__, dev->name, buflen);
 527
 528	if (buflen >= CTCM_INITIAL_BLOCKLEN) {
 529		if (ctcm_checkalloc_buffer(ch))
 530			return;
 531		ch->ccw[1].count = ch->max_bufsize;
 532		fsm_newstate(fi, CTC_STATE_RXIDLE);
 533		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
 534						(unsigned long)ch, 0xff, 0);
 535		if (rc != 0) {
 536			fsm_newstate(fi, CTC_STATE_RXINIT);
 537			ctcm_ccw_check_rc(ch, rc, "initial RX");
 538		} else
 539			fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
 540	} else {
 541		CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n",
 542				__func__, dev->name,
 543					buflen, CTCM_INITIAL_BLOCKLEN);
 544		chx_firstio(fi, event, arg);
 545	}
 546}
 547
 548/**
 549 * Set channel into extended mode.
 550 *
 551 * fi		An instance of a channel statemachine.
 552 * event	The event, just happened.
 553 * arg		Generic pointer, casted from channel * upon call.
 554 */
 555static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
 556{
 557	struct channel *ch = arg;
 558	int rc;
 559	unsigned long saveflags = 0;
 560	int timeout = CTCM_TIME_5_SEC;
 561
 562	fsm_deltimer(&ch->timer);
 563	if (IS_MPC(ch)) {
 564		timeout = 1500;
 565		CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
 566				__func__, smp_processor_id(), ch, ch->id);
 567	}
 568	fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
 569	fsm_newstate(fi, CTC_STATE_SETUPWAIT);
 570	CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
 571
 572	if (event == CTC_EVENT_TIMER)	/* only for timer not yet locked */
 573		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
 574			/* Such conditional locking is undeterministic in
 575			 * static view. => ignore sparse warnings here. */
 576
 577	rc = ccw_device_start(ch->cdev, &ch->ccw[6],
 578					(unsigned long)ch, 0xff, 0);
 579	if (event == CTC_EVENT_TIMER)	/* see above comments */
 580		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
 581	if (rc != 0) {
 582		fsm_deltimer(&ch->timer);
 583		fsm_newstate(fi, CTC_STATE_STARTWAIT);
 584		ctcm_ccw_check_rc(ch, rc, "set Mode");
 585	} else
 586		ch->retry = 0;
 587}
 588
 589/**
 590 * Setup channel.
 591 *
 592 * fi		An instance of a channel statemachine.
 593 * event	The event, just happened.
 594 * arg		Generic pointer, casted from channel * upon call.
 595 */
 596static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
 597{
 598	struct channel *ch	= arg;
 599	unsigned long saveflags;
 600	int rc;
 601
 602	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
 603		CTCM_FUNTAIL, ch->id,
 604		(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
 605
 606	if (ch->trans_skb != NULL) {
 607		clear_normalized_cda(&ch->ccw[1]);
 608		dev_kfree_skb(ch->trans_skb);
 609		ch->trans_skb = NULL;
 610	}
 611	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
 612		ch->ccw[1].cmd_code = CCW_CMD_READ;
 613		ch->ccw[1].flags = CCW_FLAG_SLI;
 614		ch->ccw[1].count = 0;
 615	} else {
 616		ch->ccw[1].cmd_code = CCW_CMD_WRITE;
 617		ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
 618		ch->ccw[1].count = 0;
 619	}
 620	if (ctcm_checkalloc_buffer(ch)) {
 621		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
 622			"%s(%s): %s trans_skb alloc delayed "
 623			"until first transfer",
 624			CTCM_FUNTAIL, ch->id,
 625			(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
 626				"RX" : "TX");
 627	}
 628	ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
 629	ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
 630	ch->ccw[0].count = 0;
 631	ch->ccw[0].cda = 0;
 632	ch->ccw[2].cmd_code = CCW_CMD_NOOP;	/* jointed CE + DE */
 633	ch->ccw[2].flags = CCW_FLAG_SLI;
 634	ch->ccw[2].count = 0;
 635	ch->ccw[2].cda = 0;
 636	memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
 637	ch->ccw[4].cda = 0;
 638	ch->ccw[4].flags &= ~CCW_FLAG_IDA;
 639
 640	fsm_newstate(fi, CTC_STATE_STARTWAIT);
 641	fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
 642	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
 643	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
 644	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
 645	if (rc != 0) {
 646		if (rc != -EBUSY)
 647			fsm_deltimer(&ch->timer);
 648		ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
 649	}
 650}
 651
 652/**
 653 * Shutdown a channel.
 654 *
 655 * fi		An instance of a channel statemachine.
 656 * event	The event, just happened.
 657 * arg		Generic pointer, casted from channel * upon call.
 658 */
 659static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
 660{
 661	struct channel *ch = arg;
 662	unsigned long saveflags = 0;
 663	int rc;
 664	int oldstate;
 665
 666	fsm_deltimer(&ch->timer);
 667	if (IS_MPC(ch))
 668		fsm_deltimer(&ch->sweep_timer);
 669
 670	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
 671
 672	if (event == CTC_EVENT_STOP)	/* only for STOP not yet locked */
 673		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
 674			/* Such conditional locking is undeterministic in
 675			 * static view. => ignore sparse warnings here. */
 676	oldstate = fsm_getstate(fi);
 677	fsm_newstate(fi, CTC_STATE_TERM);
 678	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
 679
 680	if (event == CTC_EVENT_STOP)
 681		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
 682			/* see remark above about conditional locking */
 683
 684	if (rc != 0 && rc != -EBUSY) {
 685		fsm_deltimer(&ch->timer);
 686		if (event != CTC_EVENT_STOP) {
 687			fsm_newstate(fi, oldstate);
 688			ctcm_ccw_check_rc(ch, rc, (char *)__func__);
 689		}
 690	}
 691}
 692
 693/**
 694 * Cleanup helper for chx_fail and chx_stopped
 695 * cleanup channels queue and notify interface statemachine.
 696 *
 697 * fi		An instance of a channel statemachine.
 698 * state	The next state (depending on caller).
 699 * ch		The channel to operate on.
 700 */
 701static void ctcm_chx_cleanup(fsm_instance *fi, int state,
 702		struct channel *ch)
 703{
 704	struct net_device *dev = ch->netdev;
 705	struct ctcm_priv *priv = dev->ml_priv;
 706
 707	CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
 708			"%s(%s): %s[%d]\n",
 709			CTCM_FUNTAIL, dev->name, ch->id, state);
 710
 711	fsm_deltimer(&ch->timer);
 712	if (IS_MPC(ch))
 713		fsm_deltimer(&ch->sweep_timer);
 714
 715	fsm_newstate(fi, state);
 716	if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
 717		clear_normalized_cda(&ch->ccw[1]);
 718		dev_kfree_skb_any(ch->trans_skb);
 719		ch->trans_skb = NULL;
 720	}
 721
 722	ch->th_seg = 0x00;
 723	ch->th_seq_num = 0x00;
 724	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
 725		skb_queue_purge(&ch->io_queue);
 726		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
 727	} else {
 728		ctcm_purge_skb_queue(&ch->io_queue);
 729		if (IS_MPC(ch))
 730			ctcm_purge_skb_queue(&ch->sweep_queue);
 731		spin_lock(&ch->collect_lock);
 732		ctcm_purge_skb_queue(&ch->collect_queue);
 733		ch->collect_len = 0;
 734		spin_unlock(&ch->collect_lock);
 735		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
 736	}
 737}
 738
 739/**
 740 * A channel has successfully been halted.
 741 * Cleanup it's queue and notify interface statemachine.
 742 *
 743 * fi		An instance of a channel statemachine.
 744 * event	The event, just happened.
 745 * arg		Generic pointer, casted from channel * upon call.
 746 */
 747static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
 748{
 749	ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
 750}
 751
 752/**
 753 * A stop command from device statemachine arrived and we are in
 754 * not operational mode. Set state to stopped.
 755 *
 756 * fi		An instance of a channel statemachine.
 757 * event	The event, just happened.
 758 * arg		Generic pointer, casted from channel * upon call.
 759 */
 760static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
 761{
 762	fsm_newstate(fi, CTC_STATE_STOPPED);
 763}
 764
 765/**
 766 * A machine check for no path, not operational status or gone device has
 767 * happened.
 768 * Cleanup queue and notify interface statemachine.
 769 *
 770 * fi		An instance of a channel statemachine.
 771 * event	The event, just happened.
 772 * arg		Generic pointer, casted from channel * upon call.
 773 */
 774static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
 775{
 776	ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
 777}
 778
 779/**
 780 * Handle error during setup of channel.
 781 *
 782 * fi		An instance of a channel statemachine.
 783 * event	The event, just happened.
 784 * arg		Generic pointer, casted from channel * upon call.
 785 */
 786static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
 787{
 788	struct channel *ch = arg;
 789	struct net_device *dev = ch->netdev;
 790	struct ctcm_priv *priv = dev->ml_priv;
 791
 792	/*
 793	 * Special case: Got UC_RCRESET on setmode.
 794	 * This means that remote side isn't setup. In this case
 795	 * simply retry after some 10 secs...
 796	 */
 797	if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
 798	    ((event == CTC_EVENT_UC_RCRESET) ||
 799	     (event == CTC_EVENT_UC_RSRESET))) {
 800		fsm_newstate(fi, CTC_STATE_STARTRETRY);
 801		fsm_deltimer(&ch->timer);
 802		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
 803		if (!IS_MPC(ch) &&
 804		    (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
 805			int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
 806			if (rc != 0)
 807				ctcm_ccw_check_rc(ch, rc,
 808					"HaltIO in chx_setuperr");
 809		}
 810		return;
 811	}
 812
 813	CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
 814		"%s(%s) : %s error during %s channel setup state=%s\n",
 815		CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
 816		(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
 817		fsm_getstate_str(fi));
 818
 819	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
 820		fsm_newstate(fi, CTC_STATE_RXERR);
 821		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
 822	} else {
 823		fsm_newstate(fi, CTC_STATE_TXERR);
 824		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
 825	}
 826}
 827
 828/**
 829 * Restart a channel after an error.
 830 *
 831 * fi		An instance of a channel statemachine.
 832 * event	The event, just happened.
 833 * arg		Generic pointer, casted from channel * upon call.
 834 */
 835static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
 836{
 837	struct channel *ch = arg;
 838	struct net_device *dev = ch->netdev;
 839	unsigned long saveflags = 0;
 840	int oldstate;
 841	int rc;
 842
 843	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
 844		"%s: %s[%d] of %s\n",
 845			CTCM_FUNTAIL, ch->id, event, dev->name);
 846
 847	fsm_deltimer(&ch->timer);
 848
 849	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
 850	oldstate = fsm_getstate(fi);
 851	fsm_newstate(fi, CTC_STATE_STARTWAIT);
 852	if (event == CTC_EVENT_TIMER)	/* only for timer not yet locked */
 853		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
 854			/* Such conditional locking is a known problem for
 855			 * sparse because its undeterministic in static view.
 856			 * Warnings should be ignored here. */
 857	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
 858	if (event == CTC_EVENT_TIMER)
 859		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
 860	if (rc != 0) {
 861		if (rc != -EBUSY) {
 862		    fsm_deltimer(&ch->timer);
 863		    fsm_newstate(fi, oldstate);
 864		}
 865		ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
 866	}
 867}
 868
 869/**
 870 * Handle error during RX initial handshake (exchange of
 871 * 0-length block header)
 872 *
 873 * fi		An instance of a channel statemachine.
 874 * event	The event, just happened.
 875 * arg		Generic pointer, casted from channel * upon call.
 876 */
 877static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
 878{
 879	struct channel *ch = arg;
 880	struct net_device *dev = ch->netdev;
 881	struct ctcm_priv *priv = dev->ml_priv;
 882
 883	if (event == CTC_EVENT_TIMER) {
 884		if (!IS_MPCDEV(dev))
 885			/* TODO : check if MPC deletes timer somewhere */
 886			fsm_deltimer(&ch->timer);
 887		if (ch->retry++ < 3)
 888			ctcm_chx_restart(fi, event, arg);
 889		else {
 890			fsm_newstate(fi, CTC_STATE_RXERR);
 891			fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
 892		}
 893	} else {
 894		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
 895			"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
 896			ctc_ch_event_names[event], fsm_getstate_str(fi));
 897
 898		dev_warn(&dev->dev,
 899			"Initialization failed with RX/TX init handshake "
 900			"error %s\n", ctc_ch_event_names[event]);
 901	}
 902}
 903
 904/**
 905 * Notify device statemachine if we gave up initialization
 906 * of RX channel.
 907 *
 908 * fi		An instance of a channel statemachine.
 909 * event	The event, just happened.
 910 * arg		Generic pointer, casted from channel * upon call.
 911 */
 912static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
 913{
 914	struct channel *ch = arg;
 915	struct net_device *dev = ch->netdev;
 916	struct ctcm_priv *priv = dev->ml_priv;
 917
 918	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
 919			"%s(%s): RX %s busy, init. fail",
 920				CTCM_FUNTAIL, dev->name, ch->id);
 921	fsm_newstate(fi, CTC_STATE_RXERR);
 922	fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
 923}
 924
 925/**
 926 * Handle RX Unit check remote reset (remote disconnected)
 927 *
 928 * fi		An instance of a channel statemachine.
 929 * event	The event, just happened.
 930 * arg		Generic pointer, casted from channel * upon call.
 931 */
 932static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
 933{
 934	struct channel *ch = arg;
 935	struct channel *ch2;
 936	struct net_device *dev = ch->netdev;
 937	struct ctcm_priv *priv = dev->ml_priv;
 938
 939	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
 940			"%s: %s: remote disconnect - re-init ...",
 941				CTCM_FUNTAIL, dev->name);
 942	fsm_deltimer(&ch->timer);
 943	/*
 944	 * Notify device statemachine
 945	 */
 946	fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
 947	fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
 948
 949	fsm_newstate(fi, CTC_STATE_DTERM);
 950	ch2 = priv->channel[CTCM_WRITE];
 951	fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
 952
 953	ccw_device_halt(ch->cdev, (unsigned long)ch);
 954	ccw_device_halt(ch2->cdev, (unsigned long)ch2);
 955}
 956
 957/**
 958 * Handle error during TX channel initialization.
 959 *
 960 * fi		An instance of a channel statemachine.
 961 * event	The event, just happened.
 962 * arg		Generic pointer, casted from channel * upon call.
 963 */
 964static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
 965{
 966	struct channel *ch = arg;
 967	struct net_device *dev = ch->netdev;
 968	struct ctcm_priv *priv = dev->ml_priv;
 969
 970	if (event == CTC_EVENT_TIMER) {
 971		fsm_deltimer(&ch->timer);
 972		if (ch->retry++ < 3)
 973			ctcm_chx_restart(fi, event, arg);
 974		else {
 975			fsm_newstate(fi, CTC_STATE_TXERR);
 976			fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
 977		}
 978	} else {
 979		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
 980			"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
 981			ctc_ch_event_names[event], fsm_getstate_str(fi));
 982
 983		dev_warn(&dev->dev,
 984			"Initialization failed with RX/TX init handshake "
 985			"error %s\n", ctc_ch_event_names[event]);
 986	}
 987}
 988
 989/**
 990 * Handle TX timeout by retrying operation.
 991 *
 992 * fi		An instance of a channel statemachine.
 993 * event	The event, just happened.
 994 * arg		Generic pointer, casted from channel * upon call.
 995 */
 996static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
 997{
 998	struct channel *ch = arg;
 999	struct net_device *dev = ch->netdev;
1000	struct ctcm_priv *priv = dev->ml_priv;
1001	struct sk_buff *skb;
1002
1003	CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
1004			__func__, smp_processor_id(), ch, ch->id);
1005
1006	fsm_deltimer(&ch->timer);
1007	if (ch->retry++ > 3) {
1008		struct mpc_group *gptr = priv->mpcg;
1009		CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
1010				"%s: %s: retries exceeded",
1011					CTCM_FUNTAIL, ch->id);
1012		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1013		/* call restart if not MPC or if MPC and mpcg fsm is ready.
1014			use gptr as mpc indicator */
1015		if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
1016			ctcm_chx_restart(fi, event, arg);
1017				goto done;
1018	}
1019
1020	CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1021			"%s : %s: retry %d",
1022				CTCM_FUNTAIL, ch->id, ch->retry);
1023	skb = skb_peek(&ch->io_queue);
1024	if (skb) {
1025		int rc = 0;
1026		unsigned long saveflags = 0;
1027		clear_normalized_cda(&ch->ccw[4]);
1028		ch->ccw[4].count = skb->len;
1029		if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1030			CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
1031				"%s: %s: IDAL alloc failed",
1032						CTCM_FUNTAIL, ch->id);
1033			fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1034			ctcm_chx_restart(fi, event, arg);
1035				goto done;
1036		}
1037		fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
1038		if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
1039			spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1040			/* Such conditional locking is a known problem for
1041			 * sparse because its undeterministic in static view.
1042			 * Warnings should be ignored here. */
1043		if (do_debug_ccw)
1044			ctcmpc_dumpit((char *)&ch->ccw[3],
1045					sizeof(struct ccw1) * 3);
1046
1047		rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1048						(unsigned long)ch, 0xff, 0);
1049		if (event == CTC_EVENT_TIMER)
1050			spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1051					saveflags);
1052		if (rc != 0) {
1053			fsm_deltimer(&ch->timer);
1054			ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
1055			ctcm_purge_skb_queue(&ch->io_queue);
1056		}
1057	}
1058done:
1059	return;
1060}
1061
1062/**
1063 * Handle fatal errors during an I/O command.
1064 *
1065 * fi		An instance of a channel statemachine.
1066 * event	The event, just happened.
1067 * arg		Generic pointer, casted from channel * upon call.
1068 */
1069static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
1070{
1071	struct channel *ch = arg;
1072	struct net_device *dev = ch->netdev;
1073	struct ctcm_priv *priv = dev->ml_priv;
1074	int rd = CHANNEL_DIRECTION(ch->flags);
1075
1076	fsm_deltimer(&ch->timer);
1077	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
1078		"%s: %s: %s unrecoverable channel error",
1079			CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
1080
1081	if (IS_MPC(ch)) {
1082		priv->stats.tx_dropped++;
1083		priv->stats.tx_errors++;
1084	}
1085	if (rd == CTCM_READ) {
1086		fsm_newstate(fi, CTC_STATE_RXERR);
1087		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
1088	} else {
1089		fsm_newstate(fi, CTC_STATE_TXERR);
1090		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1091	}
1092}
1093
1094/*
1095 * The ctcm statemachine for a channel.
1096 */
1097const fsm_node ch_fsm[] = {
1098	{ CTC_STATE_STOPPED,	CTC_EVENT_STOP,		ctcm_action_nop  },
1099	{ CTC_STATE_STOPPED,	CTC_EVENT_START,	ctcm_chx_start  },
1100	{ CTC_STATE_STOPPED,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1101	{ CTC_STATE_STOPPED,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1102
1103	{ CTC_STATE_NOTOP,	CTC_EVENT_STOP,		ctcm_chx_stop  },
1104	{ CTC_STATE_NOTOP,	CTC_EVENT_START,	ctcm_action_nop  },
1105	{ CTC_STATE_NOTOP,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1106	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1107	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_GOOD,	ctcm_chx_start  },
1108
1109	{ CTC_STATE_STARTWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1110	{ CTC_STATE_STARTWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1111	{ CTC_STATE_STARTWAIT,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1112	{ CTC_STATE_STARTWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setuperr  },
1113	{ CTC_STATE_STARTWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1114	{ CTC_STATE_STARTWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1115
1116	{ CTC_STATE_STARTRETRY,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1117	{ CTC_STATE_STARTRETRY,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1118	{ CTC_STATE_STARTRETRY,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1119	{ CTC_STATE_STARTRETRY,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1120
1121	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1122	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1123	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_FINSTAT,	chx_firstio  },
1124	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1125	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1126	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1127	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1128	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1129
1130	{ CTC_STATE_RXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1131	{ CTC_STATE_RXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
1132	{ CTC_STATE_RXINIT,	CTC_EVENT_FINSTAT,	chx_rxidle  },
1133	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxiniterr  },
1134	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_rxiniterr  },
1135	{ CTC_STATE_RXINIT,	CTC_EVENT_TIMER,	ctcm_chx_rxiniterr  },
1136	{ CTC_STATE_RXINIT,	CTC_EVENT_ATTNBUSY,	ctcm_chx_rxinitfail  },
1137	{ CTC_STATE_RXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1138	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_ZERO,	chx_firstio  },
1139	{ CTC_STATE_RXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1140
1141	{ CTC_STATE_RXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1142	{ CTC_STATE_RXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
1143	{ CTC_STATE_RXIDLE,	CTC_EVENT_FINSTAT,	chx_rx  },
1144	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxdisc  },
1145	{ CTC_STATE_RXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1146	{ CTC_STATE_RXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1147	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_ZERO,	chx_rx  },
1148
1149	{ CTC_STATE_TXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1150	{ CTC_STATE_TXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
1151	{ CTC_STATE_TXINIT,	CTC_EVENT_FINSTAT,	ctcm_chx_txidle  },
1152	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_txiniterr  },
1153	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_txiniterr  },
1154	{ CTC_STATE_TXINIT,	CTC_EVENT_TIMER,	ctcm_chx_txiniterr  },
1155	{ CTC_STATE_TXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1156	{ CTC_STATE_TXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1157
1158	{ CTC_STATE_TXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1159	{ CTC_STATE_TXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
1160	{ CTC_STATE_TXIDLE,	CTC_EVENT_FINSTAT,	chx_firstio  },
1161	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
1162	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
1163	{ CTC_STATE_TXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1164	{ CTC_STATE_TXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1165
1166	{ CTC_STATE_TERM,	CTC_EVENT_STOP,		ctcm_action_nop  },
1167	{ CTC_STATE_TERM,	CTC_EVENT_START,	ctcm_chx_restart  },
1168	{ CTC_STATE_TERM,	CTC_EVENT_FINSTAT,	ctcm_chx_stopped  },
1169	{ CTC_STATE_TERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
1170	{ CTC_STATE_TERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
1171	{ CTC_STATE_TERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1172
1173	{ CTC_STATE_DTERM,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1174	{ CTC_STATE_DTERM,	CTC_EVENT_START,	ctcm_chx_restart  },
1175	{ CTC_STATE_DTERM,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1176	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
1177	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
1178	{ CTC_STATE_DTERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1179
1180	{ CTC_STATE_TX,		CTC_EVENT_STOP,		ctcm_chx_haltio  },
1181	{ CTC_STATE_TX,		CTC_EVENT_START,	ctcm_action_nop  },
1182	{ CTC_STATE_TX,		CTC_EVENT_FINSTAT,	chx_txdone  },
1183	{ CTC_STATE_TX,		CTC_EVENT_UC_RCRESET,	ctcm_chx_txretry  },
1184	{ CTC_STATE_TX,		CTC_EVENT_UC_RSRESET,	ctcm_chx_txretry  },
1185	{ CTC_STATE_TX,		CTC_EVENT_TIMER,	ctcm_chx_txretry  },
1186	{ CTC_STATE_TX,		CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1187	{ CTC_STATE_TX,		CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1188
1189	{ CTC_STATE_RXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1190	{ CTC_STATE_TXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1191	{ CTC_STATE_TXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1192	{ CTC_STATE_RXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1193};
1194
1195int ch_fsm_len = ARRAY_SIZE(ch_fsm);
1196
1197/*
1198 * MPC actions for mpc channel statemachine
1199 * handling of MPC protocol requires extra
1200 * statemachine and actions which are prefixed ctcmpc_ .
1201 * The ctc_ch_states and ctc_ch_state_names,
1202 * ctc_ch_events and ctc_ch_event_names share the ctcm definitions
1203 * which are expanded by some elements.
1204 */
1205
1206/*
1207 * Actions for mpc channel statemachine.
1208 */
1209
1210/**
1211 * Normal data has been send. Free the corresponding
1212 * skb (it's in io_queue), reset dev->tbusy and
1213 * revert to idle state.
1214 *
1215 * fi		An instance of a channel statemachine.
1216 * event	The event, just happened.
1217 * arg		Generic pointer, casted from channel * upon call.
1218 */
1219static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1220{
1221	struct channel		*ch = arg;
1222	struct net_device	*dev = ch->netdev;
1223	struct ctcm_priv	*priv = dev->ml_priv;
1224	struct mpc_group	*grp = priv->mpcg;
1225	struct sk_buff		*skb;
1226	int		first = 1;
1227	int		i;
1228	__u32		data_space;
1229	unsigned long	duration;
1230	struct sk_buff	*peekskb;
1231	int		rc;
1232	struct th_header *header;
1233	struct pdu	*p_header;
1234	struct timespec done_stamp = current_kernel_time(); /* xtime */
1235
1236	CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
1237			__func__, dev->name, smp_processor_id());
1238
1239	duration =
1240		(done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
1241		(done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
1242	if (duration > ch->prof.tx_time)
1243		ch->prof.tx_time = duration;
1244
1245	if (ch->irb->scsw.cmd.count != 0)
1246		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
1247			"%s(%s): TX not complete, remaining %d bytes",
1248			     CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
1249	fsm_deltimer(&ch->timer);
1250	while ((skb = skb_dequeue(&ch->io_queue))) {
1251		priv->stats.tx_packets++;
1252		priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
1253		if (first) {
1254			priv->stats.tx_bytes += 2;
1255			first = 0;
1256		}
1257		atomic_dec(&skb->users);
1258		dev_kfree_skb_irq(skb);
1259	}
1260	spin_lock(&ch->collect_lock);
1261	clear_normalized_cda(&ch->ccw[4]);
1262	if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
1263		spin_unlock(&ch->collect_lock);
1264		fsm_newstate(fi, CTC_STATE_TXIDLE);
1265				goto done;
1266	}
1267
1268	if (ctcm_checkalloc_buffer(ch)) {
1269		spin_unlock(&ch->collect_lock);
1270				goto done;
1271	}
1272	ch->trans_skb->data = ch->trans_skb_data;
1273	skb_reset_tail_pointer(ch->trans_skb);
1274	ch->trans_skb->len = 0;
1275	if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
1276		ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
1277	if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
1278		ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
1279	i = 0;
1280	p_header = NULL;
1281	data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
1282
1283	CTCM_PR_DBGDATA("%s: building trans_skb from collect_q"
1284		       " data_space:%04x\n",
1285		       __func__, data_space);
1286
1287	while ((skb = skb_dequeue(&ch->collect_queue))) {
1288		memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
1289		p_header = (struct pdu *)
1290			(skb_tail_pointer(ch->trans_skb) - skb->len);
1291		p_header->pdu_flag = 0x00;
1292		if (skb->protocol == ntohs(ETH_P_SNAP))
1293			p_header->pdu_flag |= 0x60;
1294		else
1295			p_header->pdu_flag |= 0x20;
1296
1297		CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1298				__func__, ch->trans_skb->len);
1299		CTCM_PR_DBGDATA("%s: pdu header and data for up"
1300				" to 32 bytes sent to vtam\n", __func__);
1301		CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
1302
1303		ch->collect_len -= skb->len;
1304		data_space -= skb->len;
1305		priv->stats.tx_packets++;
1306		priv->stats.tx_bytes += skb->len;
1307		atomic_dec(&skb->users);
1308		dev_kfree_skb_any(skb);
1309		peekskb = skb_peek(&ch->collect_queue);
1310		if (peekskb->len > data_space)
1311			break;
1312		i++;
1313	}
1314	/* p_header points to the last one we handled */
1315	if (p_header)
1316		p_header->pdu_flag |= PDU_LAST;	/*Say it's the last one*/
1317	header = kzalloc(TH_HEADER_LENGTH, gfp_type());
1318	if (!header) {
1319		spin_unlock(&ch->collect_lock);
1320		fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1321				goto done;
1322	}
1323	header->th_ch_flag = TH_HAS_PDU;  /* Normal data */
1324	ch->th_seq_num++;
1325	header->th_seq_num = ch->th_seq_num;
1326
1327	CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
1328					__func__, ch->th_seq_num);
1329
1330	memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
1331		TH_HEADER_LENGTH);	/* put the TH on the packet */
1332
1333	kfree(header);
1334
1335	CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1336		       __func__, ch->trans_skb->len);
1337	CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
1338			"data to vtam from collect_q\n", __func__);
1339	CTCM_D3_DUMP((char *)ch->trans_skb->data,
1340				min_t(int, ch->trans_skb->len, 50));
1341
1342	spin_unlock(&ch->collect_lock);
1343	clear_normalized_cda(&ch->ccw[1]);
1344	if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
1345		dev_kfree_skb_any(ch->trans_skb);
1346		ch->trans_skb = NULL;
1347		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
1348			"%s: %s: IDAL alloc failed",
1349				CTCM_FUNTAIL, ch->id);
1350		fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1351		return;
1352	}
1353	ch->ccw[1].count = ch->trans_skb->len;
1354	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
1355	ch->prof.send_stamp = current_kernel_time(); /* xtime */
1356	if (do_debug_ccw)
1357		ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1358	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1359					(unsigned long)ch, 0xff, 0);
1360	ch->prof.doios_multi++;
1361	if (rc != 0) {
1362		priv->stats.tx_dropped += i;
1363		priv->stats.tx_errors += i;
1364		fsm_deltimer(&ch->timer);
1365		ctcm_ccw_check_rc(ch, rc, "chained TX");
1366	}
1367done:
1368	ctcm_clear_busy(dev);
1369	return;
1370}
1371
1372/**
1373 * Got normal data, check for sanity, queue it up, allocate new buffer
1374 * trigger bottom half, and initiate next read.
1375 *
1376 * fi		An instance of a channel statemachine.
1377 * event	The event, just happened.
1378 * arg		Generic pointer, casted from channel * upon call.
1379 */
1380static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
1381{
1382	struct channel		*ch = arg;
1383	struct net_device	*dev = ch->netdev;
1384	struct ctcm_priv	*priv = dev->ml_priv;
1385	struct mpc_group	*grp = priv->mpcg;
1386	struct sk_buff		*skb = ch->trans_skb;
1387	struct sk_buff		*new_skb;
1388	unsigned long		saveflags = 0;	/* avoids compiler warning */
1389	int len	= ch->max_bufsize - ch->irb->scsw.cmd.count;
1390
1391	CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n",
1392			CTCM_FUNTAIL, dev->name, smp_processor_id(),
1393				ch->id, ch->max_bufsize, len);
1394	fsm_deltimer(&ch->timer);
1395
1396	if (skb == NULL) {
1397		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1398			"%s(%s): TRANS_SKB = NULL",
1399				CTCM_FUNTAIL, dev->name);
1400			goto again;
1401	}
1402
1403	if (len < TH_HEADER_LENGTH) {
1404		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1405				"%s(%s): packet length %d to short",
1406					CTCM_FUNTAIL, dev->name, len);
1407		priv->stats.rx_dropped++;
1408		priv->stats.rx_length_errors++;
1409	} else {
1410		/* must have valid th header or game over */
1411		__u32	block_len = len;
1412		len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
1413		new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
1414
1415		if (new_skb == NULL) {
1416			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1417				"%s(%d): skb allocation failed",
1418						CTCM_FUNTAIL, dev->name);
1419			fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1420					goto again;
1421		}
1422		switch (fsm_getstate(grp->fsm)) {
1423		case MPCG_STATE_RESET:
1424		case MPCG_STATE_INOP:
1425			dev_kfree_skb_any(new_skb);
1426			break;
1427		case MPCG_STATE_FLOWC:
1428		case MPCG_STATE_READY:
1429			memcpy(skb_put(new_skb, block_len),
1430					       skb->data, block_len);
1431			skb_queue_tail(&ch->io_queue, new_skb);
1432			tasklet_schedule(&ch->ch_tasklet);
1433			break;
1434		default:
1435			memcpy(skb_put(new_skb, len), skb->data, len);
1436			skb_queue_tail(&ch->io_queue, new_skb);
1437			tasklet_hi_schedule(&ch->ch_tasklet);
1438			break;
1439		}
1440	}
1441
1442again:
1443	switch (fsm_getstate(grp->fsm)) {
1444	int rc, dolock;
1445	case MPCG_STATE_FLOWC:
1446	case MPCG_STATE_READY:
1447		if (ctcm_checkalloc_buffer(ch))
1448			break;
1449		ch->trans_skb->data = ch->trans_skb_data;
1450		skb_reset_tail_pointer(ch->trans_skb);
1451		ch->trans_skb->len = 0;
1452		ch->ccw[1].count = ch->max_bufsize;
1453			if (do_debug_ccw)
1454			ctcmpc_dumpit((char *)&ch->ccw[0],
1455					sizeof(struct ccw1) * 3);
1456		dolock = !in_irq();
1457		if (dolock)
1458			spin_lock_irqsave(
1459				get_ccwdev_lock(ch->cdev), saveflags);
1460		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1461						(unsigned long)ch, 0xff, 0);
1462		if (dolock) /* see remark about conditional locking */
1463			spin_unlock_irqrestore(
1464				get_ccwdev_lock(ch->cdev), saveflags);
1465		if (rc != 0)
1466			ctcm_ccw_check_rc(ch, rc, "normal RX");
1467	default:
1468		break;
1469	}
1470
1471	CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
1472			__func__, dev->name, ch, ch->id);
1473
1474}
1475
1476/**
1477 * Initialize connection by sending a __u16 of value 0.
1478 *
1479 * fi		An instance of a channel statemachine.
1480 * event	The event, just happened.
1481 * arg		Generic pointer, casted from channel * upon call.
1482 */
1483static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1484{
1485	struct channel		*ch = arg;
1486	struct net_device	*dev = ch->netdev;
1487	struct ctcm_priv	*priv = dev->ml_priv;
1488	struct mpc_group	*gptr = priv->mpcg;
1489
1490	CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
1491				__func__, ch->id, ch);
1492
1493	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
1494			"%s: %s: chstate:%i, grpstate:%i, prot:%i\n",
1495			CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
1496			fsm_getstate(gptr->fsm), ch->protocol);
1497
1498	if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
1499		MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
1500
1501	fsm_deltimer(&ch->timer);
1502	if (ctcm_checkalloc_buffer(ch))
1503				goto done;
1504
1505	switch (fsm_getstate(fi)) {
1506	case CTC_STATE_STARTRETRY:
1507	case CTC_STATE_SETUPWAIT:
1508		if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
1509			ctcmpc_chx_rxidle(fi, event, arg);
1510		} else {
1511			fsm_newstate(fi, CTC_STATE_TXIDLE);
1512			fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
1513		}
1514				goto done;
1515	default:
1516		break;
1517	};
1518
1519	fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
1520		     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
1521
1522done:
1523	CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
1524				__func__, ch->id, ch);
1525	return;
1526}
1527
1528/**
1529 * Got initial data, check it. If OK,
1530 * notify device statemachine that we are up and
1531 * running.
1532 *
1533 * fi		An instance of a channel statemachine.
1534 * event	The event, just happened.
1535 * arg		Generic pointer, casted from channel * upon call.
1536 */
1537void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
1538{
1539	struct channel *ch = arg;
1540	struct net_device *dev = ch->netdev;
1541	struct ctcm_priv  *priv = dev->ml_priv;
1542	struct mpc_group  *grp = priv->mpcg;
1543	int rc;
1544	unsigned long saveflags = 0;	/* avoids compiler warning */
1545
1546	fsm_deltimer(&ch->timer);
1547	CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n",
1548			__func__, ch->id, dev->name, smp_processor_id(),
1549				fsm_getstate(fi), fsm_getstate(grp->fsm));
1550
1551	fsm_newstate(fi, CTC_STATE_RXIDLE);
1552	/* XID processing complete */
1553
1554	switch (fsm_getstate(grp->fsm)) {
1555	case MPCG_STATE_FLOWC:
1556	case MPCG_STATE_READY:
1557		if (ctcm_checkalloc_buffer(ch))
1558				goto done;
1559		ch->trans_skb->data = ch->trans_skb_data;
1560		skb_reset_tail_pointer(ch->trans_skb);
1561		ch->trans_skb->len = 0;
1562		ch->ccw[1].count = ch->max_bufsize;
1563		CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1564		if (event == CTC_EVENT_START)
1565			/* see remark about conditional locking */
1566			spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1567		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1568						(unsigned long)ch, 0xff, 0);
1569		if (event == CTC_EVENT_START)
1570			spin_unlock_irqrestore(
1571					get_ccwdev_lock(ch->cdev), saveflags);
1572		if (rc != 0) {
1573			fsm_newstate(fi, CTC_STATE_RXINIT);
1574			ctcm_ccw_check_rc(ch, rc, "initial RX");
1575				goto done;
1576		}
1577		break;
1578	default:
1579		break;
1580	}
1581
1582	fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
1583done:
1584	return;
1585}
1586
1587/*
1588 * ctcmpc channel FSM action
1589 * called from several points in ctcmpc_ch_fsm
1590 * ctcmpc only
1591 */
1592static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
1593{
1594	struct channel	  *ch     = arg;
1595	struct net_device *dev    = ch->netdev;
1596	struct ctcm_priv  *priv   = dev->ml_priv;
1597	struct mpc_group  *grp = priv->mpcg;
1598
1599	CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
1600		__func__, dev->name, ch->id, ch, smp_processor_id(),
1601			fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
1602
1603	switch (fsm_getstate(grp->fsm)) {
1604	case MPCG_STATE_XID2INITW:
1605		/* ok..start yside xid exchanges */
1606		if (!ch->in_mpcgroup)
1607			break;
1608		if (fsm_getstate(ch->fsm) ==  CH_XID0_PENDING) {
1609			fsm_deltimer(&grp->timer);
1610			fsm_addtimer(&grp->timer,
1611				MPC_XID_TIMEOUT_VALUE,
1612				MPCG_EVENT_TIME

Large files files are truncated, but you can click here to view the full file