PageRenderTime 6ms CodeModel.GetById 43ms app.highlight 72ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/scsi/aacraid/src.c

http://github.com/mirrors/linux
C | 1425 lines | 978 code | 176 blank | 271 comment | 190 complexity | ff56e0c2fbdada952363f815afd18e48 MD5 | raw file
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Adaptec AAC series RAID controller driver
   4 *	(c) Copyright 2001 Red Hat Inc.
   5 *
   6 * based on the old aacraid driver that is..
   7 * Adaptec aacraid device driver for Linux.
   8 *
   9 * Copyright (c) 2000-2010 Adaptec, Inc.
  10 *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  11 *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  12 *
  13 * Module Name:
  14 *  src.c
  15 *
  16 * Abstract: Hardware Device Interface for PMC SRC based controllers
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/init.h>
  21#include <linux/types.h>
  22#include <linux/pci.h>
  23#include <linux/spinlock.h>
  24#include <linux/slab.h>
  25#include <linux/blkdev.h>
  26#include <linux/delay.h>
  27#include <linux/completion.h>
  28#include <linux/time.h>
  29#include <linux/interrupt.h>
  30#include <scsi/scsi_host.h>
  31
  32#include "aacraid.h"
  33
  34static int aac_src_get_sync_status(struct aac_dev *dev);
  35
  36static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
  37{
  38	struct aac_msix_ctx *ctx;
  39	struct aac_dev *dev;
  40	unsigned long bellbits, bellbits_shifted;
  41	int vector_no;
  42	int isFastResponse, mode;
  43	u32 index, handle;
  44
  45	ctx = (struct aac_msix_ctx *)dev_id;
  46	dev = ctx->dev;
  47	vector_no = ctx->vector_no;
  48
  49	if (dev->msi_enabled) {
  50		mode = AAC_INT_MODE_MSI;
  51		if (vector_no == 0) {
  52			bellbits = src_readl(dev, MUnit.ODR_MSI);
  53			if (bellbits & 0x40000)
  54				mode |= AAC_INT_MODE_AIF;
  55			if (bellbits & 0x1000)
  56				mode |= AAC_INT_MODE_SYNC;
  57		}
  58	} else {
  59		mode = AAC_INT_MODE_INTX;
  60		bellbits = src_readl(dev, MUnit.ODR_R);
  61		if (bellbits & PmDoorBellResponseSent) {
  62			bellbits = PmDoorBellResponseSent;
  63			src_writel(dev, MUnit.ODR_C, bellbits);
  64			src_readl(dev, MUnit.ODR_C);
  65		} else {
  66			bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
  67			src_writel(dev, MUnit.ODR_C, bellbits);
  68			src_readl(dev, MUnit.ODR_C);
  69
  70			if (bellbits_shifted & DoorBellAifPending)
  71				mode |= AAC_INT_MODE_AIF;
  72			else if (bellbits_shifted & OUTBOUNDDOORBELL_0)
  73				mode |= AAC_INT_MODE_SYNC;
  74		}
  75	}
  76
  77	if (mode & AAC_INT_MODE_SYNC) {
  78		unsigned long sflags;
  79		struct list_head *entry;
  80		int send_it = 0;
  81		extern int aac_sync_mode;
  82
  83		if (!aac_sync_mode && !dev->msi_enabled) {
  84			src_writel(dev, MUnit.ODR_C, bellbits);
  85			src_readl(dev, MUnit.ODR_C);
  86		}
  87
  88		if (dev->sync_fib) {
  89			if (dev->sync_fib->callback)
  90				dev->sync_fib->callback(dev->sync_fib->callback_data,
  91					dev->sync_fib);
  92			spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
  93			if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
  94				dev->management_fib_count--;
  95				complete(&dev->sync_fib->event_wait);
  96			}
  97			spin_unlock_irqrestore(&dev->sync_fib->event_lock,
  98						sflags);
  99			spin_lock_irqsave(&dev->sync_lock, sflags);
 100			if (!list_empty(&dev->sync_fib_list)) {
 101				entry = dev->sync_fib_list.next;
 102				dev->sync_fib = list_entry(entry,
 103							   struct fib,
 104							   fiblink);
 105				list_del(entry);
 106				send_it = 1;
 107			} else {
 108				dev->sync_fib = NULL;
 109			}
 110			spin_unlock_irqrestore(&dev->sync_lock, sflags);
 111			if (send_it) {
 112				aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
 113					(u32)dev->sync_fib->hw_fib_pa,
 114					0, 0, 0, 0, 0,
 115					NULL, NULL, NULL, NULL, NULL);
 116			}
 117		}
 118		if (!dev->msi_enabled)
 119			mode = 0;
 120
 121	}
 122
 123	if (mode & AAC_INT_MODE_AIF) {
 124		/* handle AIF */
 125		if (dev->sa_firmware) {
 126			u32 events = src_readl(dev, MUnit.SCR0);
 127
 128			aac_intr_normal(dev, events, 1, 0, NULL);
 129			writel(events, &dev->IndexRegs->Mailbox[0]);
 130			src_writel(dev, MUnit.IDR, 1 << 23);
 131		} else {
 132			if (dev->aif_thread && dev->fsa_dev)
 133				aac_intr_normal(dev, 0, 2, 0, NULL);
 134		}
 135		if (dev->msi_enabled)
 136			aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
 137		mode = 0;
 138	}
 139
 140	if (mode) {
 141		index = dev->host_rrq_idx[vector_no];
 142
 143		for (;;) {
 144			isFastResponse = 0;
 145			/* remove toggle bit (31) */
 146			handle = le32_to_cpu((dev->host_rrq[index])
 147				& 0x7fffffff);
 148			/* check fast response bits (30, 1) */
 149			if (handle & 0x40000000)
 150				isFastResponse = 1;
 151			handle &= 0x0000ffff;
 152			if (handle == 0)
 153				break;
 154			handle >>= 2;
 155			if (dev->msi_enabled && dev->max_msix > 1)
 156				atomic_dec(&dev->rrq_outstanding[vector_no]);
 157			aac_intr_normal(dev, handle, 0, isFastResponse, NULL);
 158			dev->host_rrq[index++] = 0;
 159			if (index == (vector_no + 1) * dev->vector_cap)
 160				index = vector_no * dev->vector_cap;
 161			dev->host_rrq_idx[vector_no] = index;
 162		}
 163		mode = 0;
 164	}
 165
 166	return IRQ_HANDLED;
 167}
 168
 169/**
 170 *	aac_src_disable_interrupt	-	Disable interrupts
 171 *	@dev: Adapter
 172 */
 173
 174static void aac_src_disable_interrupt(struct aac_dev *dev)
 175{
 176	src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
 177}
 178
 179/**
 180 *	aac_src_enable_interrupt_message	-	Enable interrupts
 181 *	@dev: Adapter
 182 */
 183
 184static void aac_src_enable_interrupt_message(struct aac_dev *dev)
 185{
 186	aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT);
 187}
 188
 189/**
 190 *	src_sync_cmd	-	send a command and wait
 191 *	@dev: Adapter
 192 *	@command: Command to execute
 193 *	@p1: first parameter
 194 *	@ret: adapter status
 195 *
 196 *	This routine will send a synchronous command to the adapter and wait
 197 *	for its	completion.
 198 */
 199
 200static int src_sync_cmd(struct aac_dev *dev, u32 command,
 201	u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
 202	u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
 203{
 204	unsigned long start;
 205	unsigned long delay;
 206	int ok;
 207
 208	/*
 209	 *	Write the command into Mailbox 0
 210	 */
 211	writel(command, &dev->IndexRegs->Mailbox[0]);
 212	/*
 213	 *	Write the parameters into Mailboxes 1 - 6
 214	 */
 215	writel(p1, &dev->IndexRegs->Mailbox[1]);
 216	writel(p2, &dev->IndexRegs->Mailbox[2]);
 217	writel(p3, &dev->IndexRegs->Mailbox[3]);
 218	writel(p4, &dev->IndexRegs->Mailbox[4]);
 219
 220	/*
 221	 *	Clear the synch command doorbell to start on a clean slate.
 222	 */
 223	if (!dev->msi_enabled)
 224		src_writel(dev,
 225			   MUnit.ODR_C,
 226			   OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
 227
 228	/*
 229	 *	Disable doorbell interrupts
 230	 */
 231	src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
 232
 233	/*
 234	 *	Force the completion of the mask register write before issuing
 235	 *	the interrupt.
 236	 */
 237	src_readl(dev, MUnit.OIMR);
 238
 239	/*
 240	 *	Signal that there is a new synch command
 241	 */
 242	src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
 243
 244	if ((!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) &&
 245		!dev->in_soft_reset) {
 246		ok = 0;
 247		start = jiffies;
 248
 249		if (command == IOP_RESET_ALWAYS) {
 250			/* Wait up to 10 sec */
 251			delay = 10*HZ;
 252		} else {
 253			/* Wait up to 5 minutes */
 254			delay = 300*HZ;
 255		}
 256		while (time_before(jiffies, start+delay)) {
 257			udelay(5);	/* Delay 5 microseconds to let Mon960 get info. */
 258			/*
 259			 *	Mon960 will set doorbell0 bit when it has completed the command.
 260			 */
 261			if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
 262				/*
 263				 *	Clear the doorbell.
 264				 */
 265				if (dev->msi_enabled)
 266					aac_src_access_devreg(dev,
 267						AAC_CLEAR_SYNC_BIT);
 268				else
 269					src_writel(dev,
 270						MUnit.ODR_C,
 271						OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
 272				ok = 1;
 273				break;
 274			}
 275			/*
 276			 *	Yield the processor in case we are slow
 277			 */
 278			msleep(1);
 279		}
 280		if (unlikely(ok != 1)) {
 281			/*
 282			 *	Restore interrupt mask even though we timed out
 283			 */
 284			aac_adapter_enable_int(dev);
 285			return -ETIMEDOUT;
 286		}
 287		/*
 288		 *	Pull the synch status from Mailbox 0.
 289		 */
 290		if (status)
 291			*status = readl(&dev->IndexRegs->Mailbox[0]);
 292		if (r1)
 293			*r1 = readl(&dev->IndexRegs->Mailbox[1]);
 294		if (r2)
 295			*r2 = readl(&dev->IndexRegs->Mailbox[2]);
 296		if (r3)
 297			*r3 = readl(&dev->IndexRegs->Mailbox[3]);
 298		if (r4)
 299			*r4 = readl(&dev->IndexRegs->Mailbox[4]);
 300		if (command == GET_COMM_PREFERRED_SETTINGS)
 301			dev->max_msix =
 302				readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF;
 303		/*
 304		 *	Clear the synch command doorbell.
 305		 */
 306		if (!dev->msi_enabled)
 307			src_writel(dev,
 308				MUnit.ODR_C,
 309				OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
 310	}
 311
 312	/*
 313	 *	Restore interrupt mask
 314	 */
 315	aac_adapter_enable_int(dev);
 316	return 0;
 317}
 318
 319/**
 320 *	aac_src_interrupt_adapter	-	interrupt adapter
 321 *	@dev: Adapter
 322 *
 323 *	Send an interrupt to the i960 and breakpoint it.
 324 */
 325
 326static void aac_src_interrupt_adapter(struct aac_dev *dev)
 327{
 328	src_sync_cmd(dev, BREAKPOINT_REQUEST,
 329		0, 0, 0, 0, 0, 0,
 330		NULL, NULL, NULL, NULL, NULL);
 331}
 332
 333/**
 334 *	aac_src_notify_adapter		-	send an event to the adapter
 335 *	@dev: Adapter
 336 *	@event: Event to send
 337 *
 338 *	Notify the i960 that something it probably cares about has
 339 *	happened.
 340 */
 341
 342static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
 343{
 344	switch (event) {
 345
 346	case AdapNormCmdQue:
 347		src_writel(dev, MUnit.ODR_C,
 348			INBOUNDDOORBELL_1 << SRC_ODR_SHIFT);
 349		break;
 350	case HostNormRespNotFull:
 351		src_writel(dev, MUnit.ODR_C,
 352			INBOUNDDOORBELL_4 << SRC_ODR_SHIFT);
 353		break;
 354	case AdapNormRespQue:
 355		src_writel(dev, MUnit.ODR_C,
 356			INBOUNDDOORBELL_2 << SRC_ODR_SHIFT);
 357		break;
 358	case HostNormCmdNotFull:
 359		src_writel(dev, MUnit.ODR_C,
 360			INBOUNDDOORBELL_3 << SRC_ODR_SHIFT);
 361		break;
 362	case FastIo:
 363		src_writel(dev, MUnit.ODR_C,
 364			INBOUNDDOORBELL_6 << SRC_ODR_SHIFT);
 365		break;
 366	case AdapPrintfDone:
 367		src_writel(dev, MUnit.ODR_C,
 368			INBOUNDDOORBELL_5 << SRC_ODR_SHIFT);
 369		break;
 370	default:
 371		BUG();
 372		break;
 373	}
 374}
 375
 376/**
 377 *	aac_src_start_adapter		-	activate adapter
 378 *	@dev:	Adapter
 379 *
 380 *	Start up processing on an i960 based AAC adapter
 381 */
 382
 383static void aac_src_start_adapter(struct aac_dev *dev)
 384{
 385	union aac_init *init;
 386	int i;
 387
 388	 /* reset host_rrq_idx first */
 389	for (i = 0; i < dev->max_msix; i++) {
 390		dev->host_rrq_idx[i] = i * dev->vector_cap;
 391		atomic_set(&dev->rrq_outstanding[i], 0);
 392	}
 393	atomic_set(&dev->msix_counter, 0);
 394	dev->fibs_pushed_no = 0;
 395
 396	init = dev->init;
 397	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
 398		init->r8.host_elapsed_seconds =
 399			cpu_to_le32(ktime_get_real_seconds());
 400		src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
 401			lower_32_bits(dev->init_pa),
 402			upper_32_bits(dev->init_pa),
 403			sizeof(struct _r8) +
 404			(AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
 405			0, 0, 0, NULL, NULL, NULL, NULL, NULL);
 406	} else {
 407		init->r7.host_elapsed_seconds =
 408			cpu_to_le32(ktime_get_real_seconds());
 409		// We can only use a 32 bit address here
 410		src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
 411			(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
 412			NULL, NULL, NULL, NULL, NULL);
 413	}
 414
 415}
 416
 417/**
 418 *	aac_src_check_health
 419 *	@dev: device to check if healthy
 420 *
 421 *	Will attempt to determine if the specified adapter is alive and
 422 *	capable of handling requests, returning 0 if alive.
 423 */
 424static int aac_src_check_health(struct aac_dev *dev)
 425{
 426	u32 status = src_readl(dev, MUnit.OMR);
 427
 428	/*
 429	 *	Check to see if the board panic'd.
 430	 */
 431	if (unlikely(status & KERNEL_PANIC))
 432		goto err_blink;
 433
 434	/*
 435	 *	Check to see if the board failed any self tests.
 436	 */
 437	if (unlikely(status & SELF_TEST_FAILED))
 438		goto err_out;
 439
 440	/*
 441	 *	Check to see if the board failed any self tests.
 442	 */
 443	if (unlikely(status & MONITOR_PANIC))
 444		goto err_out;
 445
 446	/*
 447	 *	Wait for the adapter to be up and running.
 448	 */
 449	if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
 450		return -3;
 451	/*
 452	 *	Everything is OK
 453	 */
 454	return 0;
 455
 456err_out:
 457	return -1;
 458
 459err_blink:
 460	return (status >> 16) & 0xFF;
 461}
 462
 463static inline u32 aac_get_vector(struct aac_dev *dev)
 464{
 465	return atomic_inc_return(&dev->msix_counter)%dev->max_msix;
 466}
 467
 468/**
 469 *	aac_src_deliver_message
 470 *	@fib: fib to issue
 471 *
 472 *	Will send a fib, returning 0 if successful.
 473 */
 474static int aac_src_deliver_message(struct fib *fib)
 475{
 476	struct aac_dev *dev = fib->dev;
 477	struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
 478	u32 fibsize;
 479	dma_addr_t address;
 480	struct aac_fib_xporthdr *pFibX;
 481	int native_hba;
 482#if !defined(writeq)
 483	unsigned long flags;
 484#endif
 485
 486	u16 vector_no;
 487
 488	atomic_inc(&q->numpending);
 489
 490	native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0;
 491
 492
 493	if (dev->msi_enabled && dev->max_msix > 1 &&
 494		(native_hba || fib->hw_fib_va->header.Command != AifRequest)) {
 495
 496		if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
 497			&& dev->sa_firmware)
 498			vector_no = aac_get_vector(dev);
 499		else
 500			vector_no = fib->vector_no;
 501
 502		if (native_hba) {
 503			if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
 504				struct aac_hba_tm_req *tm_req;
 505
 506				tm_req = (struct aac_hba_tm_req *)
 507						fib->hw_fib_va;
 508				if (tm_req->iu_type ==
 509					HBA_IU_TYPE_SCSI_TM_REQ) {
 510					((struct aac_hba_tm_req *)
 511						fib->hw_fib_va)->reply_qid
 512							= vector_no;
 513					((struct aac_hba_tm_req *)
 514						fib->hw_fib_va)->request_id
 515							+= (vector_no << 16);
 516				} else {
 517					((struct aac_hba_reset_req *)
 518						fib->hw_fib_va)->reply_qid
 519							= vector_no;
 520					((struct aac_hba_reset_req *)
 521						fib->hw_fib_va)->request_id
 522							+= (vector_no << 16);
 523				}
 524			} else {
 525				((struct aac_hba_cmd_req *)
 526					fib->hw_fib_va)->reply_qid
 527						= vector_no;
 528				((struct aac_hba_cmd_req *)
 529					fib->hw_fib_va)->request_id
 530						+= (vector_no << 16);
 531			}
 532		} else {
 533			fib->hw_fib_va->header.Handle += (vector_no << 16);
 534		}
 535	} else {
 536		vector_no = 0;
 537	}
 538
 539	atomic_inc(&dev->rrq_outstanding[vector_no]);
 540
 541	if (native_hba) {
 542		address = fib->hw_fib_pa;
 543		fibsize = (fib->hbacmd_size + 127) / 128 - 1;
 544		if (fibsize > 31)
 545			fibsize = 31;
 546		address |= fibsize;
 547#if defined(writeq)
 548		src_writeq(dev, MUnit.IQN_L, (u64)address);
 549#else
 550		spin_lock_irqsave(&fib->dev->iq_lock, flags);
 551		src_writel(dev, MUnit.IQN_H,
 552			upper_32_bits(address) & 0xffffffff);
 553		src_writel(dev, MUnit.IQN_L, address & 0xffffffff);
 554		spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
 555#endif
 556	} else {
 557		if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
 558			dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
 559			/* Calculate the amount to the fibsize bits */
 560			fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size)
 561				+ 127) / 128 - 1;
 562			/* New FIB header, 32-bit */
 563			address = fib->hw_fib_pa;
 564			fib->hw_fib_va->header.StructType = FIB_MAGIC2;
 565			fib->hw_fib_va->header.SenderFibAddress =
 566				cpu_to_le32((u32)address);
 567			fib->hw_fib_va->header.u.TimeStamp = 0;
 568			WARN_ON(upper_32_bits(address) != 0L);
 569		} else {
 570			/* Calculate the amount to the fibsize bits */
 571			fibsize = (sizeof(struct aac_fib_xporthdr) +
 572				le16_to_cpu(fib->hw_fib_va->header.Size)
 573				+ 127) / 128 - 1;
 574			/* Fill XPORT header */
 575			pFibX = (struct aac_fib_xporthdr *)
 576				((unsigned char *)fib->hw_fib_va -
 577				sizeof(struct aac_fib_xporthdr));
 578			pFibX->Handle = fib->hw_fib_va->header.Handle;
 579			pFibX->HostAddress =
 580				cpu_to_le64((u64)fib->hw_fib_pa);
 581			pFibX->Size = cpu_to_le32(
 582				le16_to_cpu(fib->hw_fib_va->header.Size));
 583			address = fib->hw_fib_pa -
 584				(u64)sizeof(struct aac_fib_xporthdr);
 585		}
 586		if (fibsize > 31)
 587			fibsize = 31;
 588		address |= fibsize;
 589
 590#if defined(writeq)
 591		src_writeq(dev, MUnit.IQ_L, (u64)address);
 592#else
 593		spin_lock_irqsave(&fib->dev->iq_lock, flags);
 594		src_writel(dev, MUnit.IQ_H,
 595			upper_32_bits(address) & 0xffffffff);
 596		src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
 597		spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
 598#endif
 599	}
 600	return 0;
 601}
 602
 603/**
 604 *	aac_src_ioremap
 605 *	@size: mapping resize request
 606 *
 607 */
 608static int aac_src_ioremap(struct aac_dev *dev, u32 size)
 609{
 610	if (!size) {
 611		iounmap(dev->regs.src.bar1);
 612		dev->regs.src.bar1 = NULL;
 613		iounmap(dev->regs.src.bar0);
 614		dev->base = dev->regs.src.bar0 = NULL;
 615		return 0;
 616	}
 617	dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
 618		AAC_MIN_SRC_BAR1_SIZE);
 619	dev->base = NULL;
 620	if (dev->regs.src.bar1 == NULL)
 621		return -1;
 622	dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
 623	if (dev->base == NULL) {
 624		iounmap(dev->regs.src.bar1);
 625		dev->regs.src.bar1 = NULL;
 626		return -1;
 627	}
 628	dev->IndexRegs = &((struct src_registers __iomem *)
 629		dev->base)->u.tupelo.IndexRegs;
 630	return 0;
 631}
 632
 633/**
 634 *  aac_srcv_ioremap
 635 *	@size: mapping resize request
 636 *
 637 */
 638static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
 639{
 640	if (!size) {
 641		iounmap(dev->regs.src.bar0);
 642		dev->base = dev->regs.src.bar0 = NULL;
 643		return 0;
 644	}
 645
 646	dev->regs.src.bar1 =
 647	ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE);
 648	dev->base = NULL;
 649	if (dev->regs.src.bar1 == NULL)
 650		return -1;
 651	dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
 652	if (dev->base == NULL) {
 653		iounmap(dev->regs.src.bar1);
 654		dev->regs.src.bar1 = NULL;
 655		return -1;
 656	}
 657	dev->IndexRegs = &((struct src_registers __iomem *)
 658		dev->base)->u.denali.IndexRegs;
 659	return 0;
 660}
 661
 662void aac_set_intx_mode(struct aac_dev *dev)
 663{
 664	if (dev->msi_enabled) {
 665		aac_src_access_devreg(dev, AAC_ENABLE_INTX);
 666		dev->msi_enabled = 0;
 667		msleep(5000); /* Delay 5 seconds */
 668	}
 669}
 670
 671static void aac_clear_omr(struct aac_dev *dev)
 672{
 673	u32 omr_value = 0;
 674
 675	omr_value = src_readl(dev, MUnit.OMR);
 676
 677	/*
 678	 * Check for PCI Errors or Kernel Panic
 679	 */
 680	if ((omr_value == INVALID_OMR) || (omr_value & KERNEL_PANIC))
 681		omr_value = 0;
 682
 683	/*
 684	 * Preserve MSIX Value if any
 685	 */
 686	src_writel(dev, MUnit.OMR, omr_value & AAC_INT_MODE_MSIX);
 687	src_readl(dev, MUnit.OMR);
 688}
 689
 690static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
 691{
 692	__le32 supported_options3;
 693
 694	if (!aac_fib_dump)
 695		return;
 696
 697	supported_options3  = dev->supplement_adapter_info.supported_options3;
 698	if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP))
 699		return;
 700
 701	aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP,
 702			0, 0, 0,  0, 0, 0, NULL, NULL, NULL, NULL, NULL);
 703}
 704
 705static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
 706{
 707	bool ctrl_up = true;
 708	unsigned long status, start;
 709	bool is_up = false;
 710
 711	start = jiffies;
 712	do {
 713		schedule();
 714		status = src_readl(dev, MUnit.OMR);
 715
 716		if (status == 0xffffffff)
 717			status = 0;
 718
 719		if (status & KERNEL_BOOTING) {
 720			start = jiffies;
 721			continue;
 722		}
 723
 724		if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) {
 725			ctrl_up = false;
 726			break;
 727		}
 728
 729		is_up = status & KERNEL_UP_AND_RUNNING;
 730
 731	} while (!is_up);
 732
 733	return ctrl_up;
 734}
 735
 736static void aac_src_drop_io(struct aac_dev *dev)
 737{
 738	if (!dev->soft_reset_support)
 739		return;
 740
 741	aac_adapter_sync_cmd(dev, DROP_IO,
 742			0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
 743}
 744
 745static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
 746{
 747	aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
 748						NULL, NULL, NULL, NULL);
 749	aac_src_drop_io(dev);
 750}
 751
 752static void aac_send_iop_reset(struct aac_dev *dev)
 753{
 754	aac_dump_fw_fib_iop_reset(dev);
 755
 756	aac_notify_fw_of_iop_reset(dev);
 757
 758	aac_set_intx_mode(dev);
 759
 760	aac_clear_omr(dev);
 761
 762	src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
 763
 764	msleep(5000);
 765}
 766
 767static void aac_send_hardware_soft_reset(struct aac_dev *dev)
 768{
 769	u_int32_t val;
 770
 771	aac_clear_omr(dev);
 772	val = readl(((char *)(dev->base) + IBW_SWR_OFFSET));
 773	val |= 0x01;
 774	writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET));
 775	msleep_interruptible(20000);
 776}
 777
 778static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
 779{
 780	bool is_ctrl_up;
 781	int ret = 0;
 782
 783	if (bled < 0)
 784		goto invalid_out;
 785
 786	if (bled)
 787		dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled);
 788
 789	/*
 790	 * When there is a BlinkLED, IOP_RESET has not effect
 791	 */
 792	if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET)
 793		reset_type &= ~HW_IOP_RESET;
 794
 795	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
 796
 797	dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type);
 798
 799	if (reset_type & HW_IOP_RESET) {
 800		dev_info(&dev->pdev->dev, "Issuing IOP reset\n");
 801		aac_send_iop_reset(dev);
 802
 803		/*
 804		 * Creates a delay or wait till up and running comes thru
 805		 */
 806		is_ctrl_up = aac_is_ctrl_up_and_running(dev);
 807		if (!is_ctrl_up)
 808			dev_err(&dev->pdev->dev, "IOP reset failed\n");
 809		else {
 810			dev_info(&dev->pdev->dev, "IOP reset succeeded\n");
 811			goto set_startup;
 812		}
 813	}
 814
 815	if (!dev->sa_firmware) {
 816		dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n");
 817		ret = -ENODEV;
 818		goto out;
 819	}
 820
 821	if (reset_type & HW_SOFT_RESET) {
 822		dev_info(&dev->pdev->dev, "Issuing SOFT reset\n");
 823		aac_send_hardware_soft_reset(dev);
 824		dev->msi_enabled = 0;
 825
 826		is_ctrl_up = aac_is_ctrl_up_and_running(dev);
 827		if (!is_ctrl_up) {
 828			dev_err(&dev->pdev->dev, "SOFT reset failed\n");
 829			ret = -ENODEV;
 830			goto out;
 831		} else
 832			dev_info(&dev->pdev->dev, "SOFT reset succeeded\n");
 833	}
 834
 835set_startup:
 836	if (startup_timeout < 300)
 837		startup_timeout = 300;
 838
 839out:
 840	return ret;
 841
 842invalid_out:
 843	if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
 844		ret = -ENODEV;
 845goto out;
 846}
 847
 848/**
 849 *	aac_src_select_comm	-	Select communications method
 850 *	@dev: Adapter
 851 *	@comm: communications method
 852 */
 853static int aac_src_select_comm(struct aac_dev *dev, int comm)
 854{
 855	switch (comm) {
 856	case AAC_COMM_MESSAGE:
 857		dev->a_ops.adapter_intr = aac_src_intr_message;
 858		dev->a_ops.adapter_deliver = aac_src_deliver_message;
 859		break;
 860	default:
 861		return 1;
 862	}
 863	return 0;
 864}
 865
 866/**
 867 *  aac_src_init	-	initialize an Cardinal Frey Bar card
 868 *  @dev: device to configure
 869 *
 870 */
 871
 872int aac_src_init(struct aac_dev *dev)
 873{
 874	unsigned long start;
 875	unsigned long status;
 876	int restart = 0;
 877	int instance = dev->id;
 878	const char *name = dev->name;
 879
 880	dev->a_ops.adapter_ioremap = aac_src_ioremap;
 881	dev->a_ops.adapter_comm = aac_src_select_comm;
 882
 883	dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
 884	if (aac_adapter_ioremap(dev, dev->base_size)) {
 885		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
 886		goto error_iounmap;
 887	}
 888
 889	/* Failure to reset here is an option ... */
 890	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
 891	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
 892
 893	if (dev->init_reset) {
 894		dev->init_reset = false;
 895		if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
 896			++restart;
 897	}
 898
 899	/*
 900	 *	Check to see if the board panic'd while booting.
 901	 */
 902	status = src_readl(dev, MUnit.OMR);
 903	if (status & KERNEL_PANIC) {
 904		if (aac_src_restart_adapter(dev,
 905			aac_src_check_health(dev), IOP_HWSOFT_RESET))
 906			goto error_iounmap;
 907		++restart;
 908	}
 909	/*
 910	 *	Check to see if the board failed any self tests.
 911	 */
 912	status = src_readl(dev, MUnit.OMR);
 913	if (status & SELF_TEST_FAILED) {
 914		printk(KERN_ERR "%s%d: adapter self-test failed.\n",
 915			dev->name, instance);
 916		goto error_iounmap;
 917	}
 918	/*
 919	 *	Check to see if the monitor panic'd while booting.
 920	 */
 921	if (status & MONITOR_PANIC) {
 922		printk(KERN_ERR "%s%d: adapter monitor panic.\n",
 923			dev->name, instance);
 924		goto error_iounmap;
 925	}
 926	start = jiffies;
 927	/*
 928	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
 929	 */
 930	while (!((status = src_readl(dev, MUnit.OMR)) &
 931		KERNEL_UP_AND_RUNNING)) {
 932		if ((restart &&
 933		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
 934		  time_after(jiffies, start+HZ*startup_timeout)) {
 935			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
 936					dev->name, instance, status);
 937			goto error_iounmap;
 938		}
 939		if (!restart &&
 940		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
 941		  time_after(jiffies, start + HZ *
 942		  ((startup_timeout > 60)
 943		    ? (startup_timeout - 60)
 944		    : (startup_timeout / 2))))) {
 945			if (likely(!aac_src_restart_adapter(dev,
 946				aac_src_check_health(dev), IOP_HWSOFT_RESET)))
 947				start = jiffies;
 948			++restart;
 949		}
 950		msleep(1);
 951	}
 952	if (restart && aac_commit)
 953		aac_commit = 1;
 954	/*
 955	 *	Fill in the common function dispatch table.
 956	 */
 957	dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
 958	dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
 959	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
 960	dev->a_ops.adapter_notify = aac_src_notify_adapter;
 961	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
 962	dev->a_ops.adapter_check_health = aac_src_check_health;
 963	dev->a_ops.adapter_restart = aac_src_restart_adapter;
 964	dev->a_ops.adapter_start = aac_src_start_adapter;
 965
 966	/*
 967	 *	First clear out all interrupts.  Then enable the one's that we
 968	 *	can handle.
 969	 */
 970	aac_adapter_comm(dev, AAC_COMM_MESSAGE);
 971	aac_adapter_disable_int(dev);
 972	src_writel(dev, MUnit.ODR_C, 0xffffffff);
 973	aac_adapter_enable_int(dev);
 974
 975	if (aac_init_adapter(dev) == NULL)
 976		goto error_iounmap;
 977	if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
 978		goto error_iounmap;
 979
 980	dev->msi = !pci_enable_msi(dev->pdev);
 981
 982	dev->aac_msix[0].vector_no = 0;
 983	dev->aac_msix[0].dev = dev;
 984
 985	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
 986			IRQF_SHARED, "aacraid", &(dev->aac_msix[0]))  < 0) {
 987
 988		if (dev->msi)
 989			pci_disable_msi(dev->pdev);
 990
 991		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
 992			name, instance);
 993		goto error_iounmap;
 994	}
 995	dev->dbg_base = pci_resource_start(dev->pdev, 2);
 996	dev->dbg_base_mapped = dev->regs.src.bar1;
 997	dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
 998	dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
 999
1000	aac_adapter_enable_int(dev);
1001
1002	if (!dev->sync_mode) {
1003		/*
1004		 * Tell the adapter that all is configured, and it can
1005		 * start accepting requests
1006		 */
1007		aac_src_start_adapter(dev);
1008	}
1009	return 0;
1010
1011error_iounmap:
1012
1013	return -1;
1014}
1015
1016static int aac_src_wait_sync(struct aac_dev *dev, int *status)
1017{
1018	unsigned long start = jiffies;
1019	unsigned long usecs = 0;
1020	int delay = 5 * HZ;
1021	int rc = 1;
1022
1023	while (time_before(jiffies, start+delay)) {
1024		/*
1025		 * Delay 5 microseconds to let Mon960 get info.
1026		 */
1027		udelay(5);
1028
1029		/*
1030		 * Mon960 will set doorbell0 bit when it has completed the
1031		 * command.
1032		 */
1033		if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
1034			/*
1035			 * Clear: the doorbell.
1036			 */
1037			if (dev->msi_enabled)
1038				aac_src_access_devreg(dev, AAC_CLEAR_SYNC_BIT);
1039			else
1040				src_writel(dev, MUnit.ODR_C,
1041					OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
1042			rc = 0;
1043
1044			break;
1045		}
1046
1047		/*
1048		 * Yield the processor in case we are slow
1049		 */
1050		usecs = 1 * USEC_PER_MSEC;
1051		usleep_range(usecs, usecs + 50);
1052	}
1053	/*
1054	 * Pull the synch status from Mailbox 0.
1055	 */
1056	if (status && !rc) {
1057		status[0] = readl(&dev->IndexRegs->Mailbox[0]);
1058		status[1] = readl(&dev->IndexRegs->Mailbox[1]);
1059		status[2] = readl(&dev->IndexRegs->Mailbox[2]);
1060		status[3] = readl(&dev->IndexRegs->Mailbox[3]);
1061		status[4] = readl(&dev->IndexRegs->Mailbox[4]);
1062	}
1063
1064	return rc;
1065}
1066
1067/**
1068 *  aac_src_soft_reset	-	perform soft reset to speed up
1069 *  access
1070 *
1071 *  Assumptions: That the controller is in a state where we can
1072 *  bring it back to life with an init struct. We can only use
1073 *  fast sync commands, as the timeout is 5 seconds.
1074 *
1075 *  @dev: device to configure
1076 *
1077 */
1078
1079static int aac_src_soft_reset(struct aac_dev *dev)
1080{
1081	u32 status_omr = src_readl(dev, MUnit.OMR);
1082	u32 status[5];
1083	int rc = 1;
1084	int state = 0;
1085	char *state_str[7] = {
1086		"GET_ADAPTER_PROPERTIES Failed",
1087		"GET_ADAPTER_PROPERTIES timeout",
1088		"SOFT_RESET not supported",
1089		"DROP_IO Failed",
1090		"DROP_IO timeout",
1091		"Check Health failed"
1092	};
1093
1094	if (status_omr == INVALID_OMR)
1095		return 1;       // pcie hosed
1096
1097	if (!(status_omr & KERNEL_UP_AND_RUNNING))
1098		return 1;       // not up and running
1099
1100	/*
1101	 * We go into soft reset mode to allow us to handle response
1102	 */
1103	dev->in_soft_reset = 1;
1104	dev->msi_enabled = status_omr & AAC_INT_MODE_MSIX;
1105
1106	/* Get adapter properties */
1107	rc = aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0,
1108		0, 0, 0, status+0, status+1, status+2, status+3, status+4);
1109	if (rc)
1110		goto out;
1111
1112	state++;
1113	if (aac_src_wait_sync(dev, status)) {
1114		rc = 1;
1115		goto out;
1116	}
1117
1118	state++;
1119	if (!(status[1] & le32_to_cpu(AAC_OPT_EXTENDED) &&
1120		(status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)))) {
1121		rc = 2;
1122		goto out;
1123	}
1124
1125	if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
1126		(status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
1127		dev->sa_firmware = 1;
1128
1129	state++;
1130	rc = aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0,
1131		 status+0, status+1, status+2, status+3, status+4);
1132
1133	if (rc)
1134		goto out;
1135
1136	state++;
1137	if (aac_src_wait_sync(dev, status)) {
1138		rc = 3;
1139		goto out;
1140	}
1141
1142	if (status[1])
1143		dev_err(&dev->pdev->dev, "%s: %d outstanding I/O pending\n",
1144			__func__, status[1]);
1145
1146	state++;
1147	rc = aac_src_check_health(dev);
1148
1149out:
1150	dev->in_soft_reset = 0;
1151	dev->msi_enabled = 0;
1152	if (rc)
1153		dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__,
1154			state_str[state], rc);
1155
1156	return rc;
1157}
1158/**
1159 *  aac_srcv_init	-	initialize an SRCv card
1160 *  @dev: device to configure
1161 *
1162 */
1163
1164int aac_srcv_init(struct aac_dev *dev)
1165{
1166	unsigned long start;
1167	unsigned long status;
1168	int restart = 0;
1169	int instance = dev->id;
1170	const char *name = dev->name;
1171
1172	dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
1173	dev->a_ops.adapter_comm = aac_src_select_comm;
1174
1175	dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
1176	if (aac_adapter_ioremap(dev, dev->base_size)) {
1177		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
1178		goto error_iounmap;
1179	}
1180
1181	/* Failure to reset here is an option ... */
1182	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
1183	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
1184
1185	if (dev->init_reset) {
1186		dev->init_reset = false;
1187		if (aac_src_soft_reset(dev)) {
1188			aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET);
1189			++restart;
1190		}
1191	}
1192
1193	/*
1194	 *	Check to see if flash update is running.
1195	 *	Wait for the adapter to be up and running. Wait up to 5 minutes
1196	 */
1197	status = src_readl(dev, MUnit.OMR);
1198	if (status & FLASH_UPD_PENDING) {
1199		start = jiffies;
1200		do {
1201			status = src_readl(dev, MUnit.OMR);
1202			if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
1203				printk(KERN_ERR "%s%d: adapter flash update failed.\n",
1204					dev->name, instance);
1205				goto error_iounmap;
1206			}
1207		} while (!(status & FLASH_UPD_SUCCESS) &&
1208			 !(status & FLASH_UPD_FAILED));
1209		/* Delay 10 seconds.
1210		 * Because right now FW is doing a soft reset,
1211		 * do not read scratch pad register at this time
1212		 */
1213		ssleep(10);
1214	}
1215	/*
1216	 *	Check to see if the board panic'd while booting.
1217	 */
1218	status = src_readl(dev, MUnit.OMR);
1219	if (status & KERNEL_PANIC) {
1220		if (aac_src_restart_adapter(dev,
1221			aac_src_check_health(dev), IOP_HWSOFT_RESET))
1222			goto error_iounmap;
1223		++restart;
1224	}
1225	/*
1226	 *	Check to see if the board failed any self tests.
1227	 */
1228	status = src_readl(dev, MUnit.OMR);
1229	if (status & SELF_TEST_FAILED) {
1230		printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
1231		goto error_iounmap;
1232	}
1233	/*
1234	 *	Check to see if the monitor panic'd while booting.
1235	 */
1236	if (status & MONITOR_PANIC) {
1237		printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
1238		goto error_iounmap;
1239	}
1240
1241	start = jiffies;
1242	/*
1243	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
1244	 */
1245	do {
1246		status = src_readl(dev, MUnit.OMR);
1247		if (status == INVALID_OMR)
1248			status = 0;
1249
1250		if ((restart &&
1251		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
1252		  time_after(jiffies, start+HZ*startup_timeout)) {
1253			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
1254					dev->name, instance, status);
1255			goto error_iounmap;
1256		}
1257		if (!restart &&
1258		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
1259		  time_after(jiffies, start + HZ *
1260		  ((startup_timeout > 60)
1261		    ? (startup_timeout - 60)
1262		    : (startup_timeout / 2))))) {
1263			if (likely(!aac_src_restart_adapter(dev,
1264				aac_src_check_health(dev), IOP_HWSOFT_RESET)))
1265				start = jiffies;
1266			++restart;
1267		}
1268		msleep(1);
1269	} while (!(status & KERNEL_UP_AND_RUNNING));
1270
1271	if (restart && aac_commit)
1272		aac_commit = 1;
1273	/*
1274	 *	Fill in the common function dispatch table.
1275	 */
1276	dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
1277	dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
1278	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
1279	dev->a_ops.adapter_notify = aac_src_notify_adapter;
1280	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
1281	dev->a_ops.adapter_check_health = aac_src_check_health;
1282	dev->a_ops.adapter_restart = aac_src_restart_adapter;
1283	dev->a_ops.adapter_start = aac_src_start_adapter;
1284
1285	/*
1286	 *	First clear out all interrupts.  Then enable the one's that we
1287	 *	can handle.
1288	 */
1289	aac_adapter_comm(dev, AAC_COMM_MESSAGE);
1290	aac_adapter_disable_int(dev);
1291	src_writel(dev, MUnit.ODR_C, 0xffffffff);
1292	aac_adapter_enable_int(dev);
1293
1294	if (aac_init_adapter(dev) == NULL)
1295		goto error_iounmap;
1296	if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) &&
1297		(dev->comm_interface != AAC_COMM_MESSAGE_TYPE3))
1298		goto error_iounmap;
1299	if (dev->msi_enabled)
1300		aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
1301
1302	if (aac_acquire_irq(dev))
1303		goto error_iounmap;
1304
1305	dev->dbg_base = pci_resource_start(dev->pdev, 2);
1306	dev->dbg_base_mapped = dev->regs.src.bar1;
1307	dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE;
1308	dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
1309
1310	aac_adapter_enable_int(dev);
1311
1312	if (!dev->sync_mode) {
1313		/*
1314		 * Tell the adapter that all is configured, and it can
1315		 * start accepting requests
1316		 */
1317		aac_src_start_adapter(dev);
1318	}
1319	return 0;
1320
1321error_iounmap:
1322
1323	return -1;
1324}
1325
1326void aac_src_access_devreg(struct aac_dev *dev, int mode)
1327{
1328	u_int32_t val;
1329
1330	switch (mode) {
1331	case AAC_ENABLE_INTERRUPT:
1332		src_writel(dev,
1333			   MUnit.OIMR,
1334			   dev->OIMR = (dev->msi_enabled ?
1335					AAC_INT_ENABLE_TYPE1_MSIX :
1336					AAC_INT_ENABLE_TYPE1_INTX));
1337		break;
1338
1339	case AAC_DISABLE_INTERRUPT:
1340		src_writel(dev,
1341			   MUnit.OIMR,
1342			   dev->OIMR = AAC_INT_DISABLE_ALL);
1343		break;
1344
1345	case AAC_ENABLE_MSIX:
1346		/* set bit 6 */
1347		val = src_readl(dev, MUnit.IDR);
1348		val |= 0x40;
1349		src_writel(dev,  MUnit.IDR, val);
1350		src_readl(dev, MUnit.IDR);
1351		/* unmask int. */
1352		val = PMC_ALL_INTERRUPT_BITS;
1353		src_writel(dev, MUnit.IOAR, val);
1354		val = src_readl(dev, MUnit.OIMR);
1355		src_writel(dev,
1356			   MUnit.OIMR,
1357			   val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
1358		break;
1359
1360	case AAC_DISABLE_MSIX:
1361		/* reset bit 6 */
1362		val = src_readl(dev, MUnit.IDR);
1363		val &= ~0x40;
1364		src_writel(dev, MUnit.IDR, val);
1365		src_readl(dev, MUnit.IDR);
1366		break;
1367
1368	case AAC_CLEAR_AIF_BIT:
1369		/* set bit 5 */
1370		val = src_readl(dev, MUnit.IDR);
1371		val |= 0x20;
1372		src_writel(dev, MUnit.IDR, val);
1373		src_readl(dev, MUnit.IDR);
1374		break;
1375
1376	case AAC_CLEAR_SYNC_BIT:
1377		/* set bit 4 */
1378		val = src_readl(dev, MUnit.IDR);
1379		val |= 0x10;
1380		src_writel(dev, MUnit.IDR, val);
1381		src_readl(dev, MUnit.IDR);
1382		break;
1383
1384	case AAC_ENABLE_INTX:
1385		/* set bit 7 */
1386		val = src_readl(dev, MUnit.IDR);
1387		val |= 0x80;
1388		src_writel(dev, MUnit.IDR, val);
1389		src_readl(dev, MUnit.IDR);
1390		/* unmask int. */
1391		val = PMC_ALL_INTERRUPT_BITS;
1392		src_writel(dev, MUnit.IOAR, val);
1393		src_readl(dev, MUnit.IOAR);
1394		val = src_readl(dev, MUnit.OIMR);
1395		src_writel(dev, MUnit.OIMR,
1396				val & (~(PMC_GLOBAL_INT_BIT2)));
1397		break;
1398
1399	default:
1400		break;
1401	}
1402}
1403
1404static int aac_src_get_sync_status(struct aac_dev *dev)
1405{
1406	int msix_val = 0;
1407	int legacy_val = 0;
1408
1409	msix_val = src_readl(dev, MUnit.ODR_MSI) & SRC_MSI_READ_MASK ? 1 : 0;
1410
1411	if (!dev->msi_enabled) {
1412		/*
1413		 * if Legacy int status indicates cmd is not complete
1414		 * sample MSIx register to see if it indiactes cmd complete,
1415		 * if yes set the controller in MSIx mode and consider cmd
1416		 * completed
1417		 */
1418		legacy_val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT;
1419		if (!(legacy_val & 1) && msix_val)
1420			dev->msi_enabled = 1;
1421		return legacy_val;
1422	}
1423
1424	return msix_val;
1425}