/drivers/scsi/aacraid/src.c

http://github.com/mirrors/linux · C · 1425 lines · 978 code · 176 blank · 271 comment · 190 complexity · ff56e0c2fbdada952363f815afd18e48 MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Adaptec AAC series RAID controller driver
  4. * (c) Copyright 2001 Red Hat Inc.
  5. *
  6. * based on the old aacraid driver that is..
  7. * Adaptec aacraid device driver for Linux.
  8. *
  9. * Copyright (c) 2000-2010 Adaptec, Inc.
  10. * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  11. * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  12. *
  13. * Module Name:
  14. * src.c
  15. *
  16. * Abstract: Hardware Device Interface for PMC SRC based controllers
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/init.h>
  20. #include <linux/types.h>
  21. #include <linux/pci.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/slab.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/delay.h>
  26. #include <linux/completion.h>
  27. #include <linux/time.h>
  28. #include <linux/interrupt.h>
  29. #include <scsi/scsi_host.h>
  30. #include "aacraid.h"
  31. static int aac_src_get_sync_status(struct aac_dev *dev);
  32. static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
  33. {
  34. struct aac_msix_ctx *ctx;
  35. struct aac_dev *dev;
  36. unsigned long bellbits, bellbits_shifted;
  37. int vector_no;
  38. int isFastResponse, mode;
  39. u32 index, handle;
  40. ctx = (struct aac_msix_ctx *)dev_id;
  41. dev = ctx->dev;
  42. vector_no = ctx->vector_no;
  43. if (dev->msi_enabled) {
  44. mode = AAC_INT_MODE_MSI;
  45. if (vector_no == 0) {
  46. bellbits = src_readl(dev, MUnit.ODR_MSI);
  47. if (bellbits & 0x40000)
  48. mode |= AAC_INT_MODE_AIF;
  49. if (bellbits & 0x1000)
  50. mode |= AAC_INT_MODE_SYNC;
  51. }
  52. } else {
  53. mode = AAC_INT_MODE_INTX;
  54. bellbits = src_readl(dev, MUnit.ODR_R);
  55. if (bellbits & PmDoorBellResponseSent) {
  56. bellbits = PmDoorBellResponseSent;
  57. src_writel(dev, MUnit.ODR_C, bellbits);
  58. src_readl(dev, MUnit.ODR_C);
  59. } else {
  60. bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
  61. src_writel(dev, MUnit.ODR_C, bellbits);
  62. src_readl(dev, MUnit.ODR_C);
  63. if (bellbits_shifted & DoorBellAifPending)
  64. mode |= AAC_INT_MODE_AIF;
  65. else if (bellbits_shifted & OUTBOUNDDOORBELL_0)
  66. mode |= AAC_INT_MODE_SYNC;
  67. }
  68. }
  69. if (mode & AAC_INT_MODE_SYNC) {
  70. unsigned long sflags;
  71. struct list_head *entry;
  72. int send_it = 0;
  73. extern int aac_sync_mode;
  74. if (!aac_sync_mode && !dev->msi_enabled) {
  75. src_writel(dev, MUnit.ODR_C, bellbits);
  76. src_readl(dev, MUnit.ODR_C);
  77. }
  78. if (dev->sync_fib) {
  79. if (dev->sync_fib->callback)
  80. dev->sync_fib->callback(dev->sync_fib->callback_data,
  81. dev->sync_fib);
  82. spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
  83. if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
  84. dev->management_fib_count--;
  85. complete(&dev->sync_fib->event_wait);
  86. }
  87. spin_unlock_irqrestore(&dev->sync_fib->event_lock,
  88. sflags);
  89. spin_lock_irqsave(&dev->sync_lock, sflags);
  90. if (!list_empty(&dev->sync_fib_list)) {
  91. entry = dev->sync_fib_list.next;
  92. dev->sync_fib = list_entry(entry,
  93. struct fib,
  94. fiblink);
  95. list_del(entry);
  96. send_it = 1;
  97. } else {
  98. dev->sync_fib = NULL;
  99. }
  100. spin_unlock_irqrestore(&dev->sync_lock, sflags);
  101. if (send_it) {
  102. aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
  103. (u32)dev->sync_fib->hw_fib_pa,
  104. 0, 0, 0, 0, 0,
  105. NULL, NULL, NULL, NULL, NULL);
  106. }
  107. }
  108. if (!dev->msi_enabled)
  109. mode = 0;
  110. }
  111. if (mode & AAC_INT_MODE_AIF) {
  112. /* handle AIF */
  113. if (dev->sa_firmware) {
  114. u32 events = src_readl(dev, MUnit.SCR0);
  115. aac_intr_normal(dev, events, 1, 0, NULL);
  116. writel(events, &dev->IndexRegs->Mailbox[0]);
  117. src_writel(dev, MUnit.IDR, 1 << 23);
  118. } else {
  119. if (dev->aif_thread && dev->fsa_dev)
  120. aac_intr_normal(dev, 0, 2, 0, NULL);
  121. }
  122. if (dev->msi_enabled)
  123. aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
  124. mode = 0;
  125. }
  126. if (mode) {
  127. index = dev->host_rrq_idx[vector_no];
  128. for (;;) {
  129. isFastResponse = 0;
  130. /* remove toggle bit (31) */
  131. handle = le32_to_cpu((dev->host_rrq[index])
  132. & 0x7fffffff);
  133. /* check fast response bits (30, 1) */
  134. if (handle & 0x40000000)
  135. isFastResponse = 1;
  136. handle &= 0x0000ffff;
  137. if (handle == 0)
  138. break;
  139. handle >>= 2;
  140. if (dev->msi_enabled && dev->max_msix > 1)
  141. atomic_dec(&dev->rrq_outstanding[vector_no]);
  142. aac_intr_normal(dev, handle, 0, isFastResponse, NULL);
  143. dev->host_rrq[index++] = 0;
  144. if (index == (vector_no + 1) * dev->vector_cap)
  145. index = vector_no * dev->vector_cap;
  146. dev->host_rrq_idx[vector_no] = index;
  147. }
  148. mode = 0;
  149. }
  150. return IRQ_HANDLED;
  151. }
  152. /**
  153. * aac_src_disable_interrupt - Disable interrupts
  154. * @dev: Adapter
  155. */
  156. static void aac_src_disable_interrupt(struct aac_dev *dev)
  157. {
  158. src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
  159. }
  160. /**
  161. * aac_src_enable_interrupt_message - Enable interrupts
  162. * @dev: Adapter
  163. */
  164. static void aac_src_enable_interrupt_message(struct aac_dev *dev)
  165. {
  166. aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT);
  167. }
  168. /**
  169. * src_sync_cmd - send a command and wait
  170. * @dev: Adapter
  171. * @command: Command to execute
  172. * @p1: first parameter
  173. * @ret: adapter status
  174. *
  175. * This routine will send a synchronous command to the adapter and wait
  176. * for its completion.
  177. */
  178. static int src_sync_cmd(struct aac_dev *dev, u32 command,
  179. u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
  180. u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
  181. {
  182. unsigned long start;
  183. unsigned long delay;
  184. int ok;
  185. /*
  186. * Write the command into Mailbox 0
  187. */
  188. writel(command, &dev->IndexRegs->Mailbox[0]);
  189. /*
  190. * Write the parameters into Mailboxes 1 - 6
  191. */
  192. writel(p1, &dev->IndexRegs->Mailbox[1]);
  193. writel(p2, &dev->IndexRegs->Mailbox[2]);
  194. writel(p3, &dev->IndexRegs->Mailbox[3]);
  195. writel(p4, &dev->IndexRegs->Mailbox[4]);
  196. /*
  197. * Clear the synch command doorbell to start on a clean slate.
  198. */
  199. if (!dev->msi_enabled)
  200. src_writel(dev,
  201. MUnit.ODR_C,
  202. OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
  203. /*
  204. * Disable doorbell interrupts
  205. */
  206. src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
  207. /*
  208. * Force the completion of the mask register write before issuing
  209. * the interrupt.
  210. */
  211. src_readl(dev, MUnit.OIMR);
  212. /*
  213. * Signal that there is a new synch command
  214. */
  215. src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
  216. if ((!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) &&
  217. !dev->in_soft_reset) {
  218. ok = 0;
  219. start = jiffies;
  220. if (command == IOP_RESET_ALWAYS) {
  221. /* Wait up to 10 sec */
  222. delay = 10*HZ;
  223. } else {
  224. /* Wait up to 5 minutes */
  225. delay = 300*HZ;
  226. }
  227. while (time_before(jiffies, start+delay)) {
  228. udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
  229. /*
  230. * Mon960 will set doorbell0 bit when it has completed the command.
  231. */
  232. if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
  233. /*
  234. * Clear the doorbell.
  235. */
  236. if (dev->msi_enabled)
  237. aac_src_access_devreg(dev,
  238. AAC_CLEAR_SYNC_BIT);
  239. else
  240. src_writel(dev,
  241. MUnit.ODR_C,
  242. OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
  243. ok = 1;
  244. break;
  245. }
  246. /*
  247. * Yield the processor in case we are slow
  248. */
  249. msleep(1);
  250. }
  251. if (unlikely(ok != 1)) {
  252. /*
  253. * Restore interrupt mask even though we timed out
  254. */
  255. aac_adapter_enable_int(dev);
  256. return -ETIMEDOUT;
  257. }
  258. /*
  259. * Pull the synch status from Mailbox 0.
  260. */
  261. if (status)
  262. *status = readl(&dev->IndexRegs->Mailbox[0]);
  263. if (r1)
  264. *r1 = readl(&dev->IndexRegs->Mailbox[1]);
  265. if (r2)
  266. *r2 = readl(&dev->IndexRegs->Mailbox[2]);
  267. if (r3)
  268. *r3 = readl(&dev->IndexRegs->Mailbox[3]);
  269. if (r4)
  270. *r4 = readl(&dev->IndexRegs->Mailbox[4]);
  271. if (command == GET_COMM_PREFERRED_SETTINGS)
  272. dev->max_msix =
  273. readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF;
  274. /*
  275. * Clear the synch command doorbell.
  276. */
  277. if (!dev->msi_enabled)
  278. src_writel(dev,
  279. MUnit.ODR_C,
  280. OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
  281. }
  282. /*
  283. * Restore interrupt mask
  284. */
  285. aac_adapter_enable_int(dev);
  286. return 0;
  287. }
  288. /**
  289. * aac_src_interrupt_adapter - interrupt adapter
  290. * @dev: Adapter
  291. *
  292. * Send an interrupt to the i960 and breakpoint it.
  293. */
  294. static void aac_src_interrupt_adapter(struct aac_dev *dev)
  295. {
  296. src_sync_cmd(dev, BREAKPOINT_REQUEST,
  297. 0, 0, 0, 0, 0, 0,
  298. NULL, NULL, NULL, NULL, NULL);
  299. }
  300. /**
  301. * aac_src_notify_adapter - send an event to the adapter
  302. * @dev: Adapter
  303. * @event: Event to send
  304. *
  305. * Notify the i960 that something it probably cares about has
  306. * happened.
  307. */
  308. static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
  309. {
  310. switch (event) {
  311. case AdapNormCmdQue:
  312. src_writel(dev, MUnit.ODR_C,
  313. INBOUNDDOORBELL_1 << SRC_ODR_SHIFT);
  314. break;
  315. case HostNormRespNotFull:
  316. src_writel(dev, MUnit.ODR_C,
  317. INBOUNDDOORBELL_4 << SRC_ODR_SHIFT);
  318. break;
  319. case AdapNormRespQue:
  320. src_writel(dev, MUnit.ODR_C,
  321. INBOUNDDOORBELL_2 << SRC_ODR_SHIFT);
  322. break;
  323. case HostNormCmdNotFull:
  324. src_writel(dev, MUnit.ODR_C,
  325. INBOUNDDOORBELL_3 << SRC_ODR_SHIFT);
  326. break;
  327. case FastIo:
  328. src_writel(dev, MUnit.ODR_C,
  329. INBOUNDDOORBELL_6 << SRC_ODR_SHIFT);
  330. break;
  331. case AdapPrintfDone:
  332. src_writel(dev, MUnit.ODR_C,
  333. INBOUNDDOORBELL_5 << SRC_ODR_SHIFT);
  334. break;
  335. default:
  336. BUG();
  337. break;
  338. }
  339. }
  340. /**
  341. * aac_src_start_adapter - activate adapter
  342. * @dev: Adapter
  343. *
  344. * Start up processing on an i960 based AAC adapter
  345. */
  346. static void aac_src_start_adapter(struct aac_dev *dev)
  347. {
  348. union aac_init *init;
  349. int i;
  350. /* reset host_rrq_idx first */
  351. for (i = 0; i < dev->max_msix; i++) {
  352. dev->host_rrq_idx[i] = i * dev->vector_cap;
  353. atomic_set(&dev->rrq_outstanding[i], 0);
  354. }
  355. atomic_set(&dev->msix_counter, 0);
  356. dev->fibs_pushed_no = 0;
  357. init = dev->init;
  358. if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
  359. init->r8.host_elapsed_seconds =
  360. cpu_to_le32(ktime_get_real_seconds());
  361. src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
  362. lower_32_bits(dev->init_pa),
  363. upper_32_bits(dev->init_pa),
  364. sizeof(struct _r8) +
  365. (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
  366. 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
  367. } else {
  368. init->r7.host_elapsed_seconds =
  369. cpu_to_le32(ktime_get_real_seconds());
  370. // We can only use a 32 bit address here
  371. src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
  372. (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
  373. NULL, NULL, NULL, NULL, NULL);
  374. }
  375. }
  376. /**
  377. * aac_src_check_health
  378. * @dev: device to check if healthy
  379. *
  380. * Will attempt to determine if the specified adapter is alive and
  381. * capable of handling requests, returning 0 if alive.
  382. */
  383. static int aac_src_check_health(struct aac_dev *dev)
  384. {
  385. u32 status = src_readl(dev, MUnit.OMR);
  386. /*
  387. * Check to see if the board panic'd.
  388. */
  389. if (unlikely(status & KERNEL_PANIC))
  390. goto err_blink;
  391. /*
  392. * Check to see if the board failed any self tests.
  393. */
  394. if (unlikely(status & SELF_TEST_FAILED))
  395. goto err_out;
  396. /*
  397. * Check to see if the board failed any self tests.
  398. */
  399. if (unlikely(status & MONITOR_PANIC))
  400. goto err_out;
  401. /*
  402. * Wait for the adapter to be up and running.
  403. */
  404. if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
  405. return -3;
  406. /*
  407. * Everything is OK
  408. */
  409. return 0;
  410. err_out:
  411. return -1;
  412. err_blink:
  413. return (status >> 16) & 0xFF;
  414. }
  415. static inline u32 aac_get_vector(struct aac_dev *dev)
  416. {
  417. return atomic_inc_return(&dev->msix_counter)%dev->max_msix;
  418. }
  419. /**
  420. * aac_src_deliver_message
  421. * @fib: fib to issue
  422. *
  423. * Will send a fib, returning 0 if successful.
  424. */
  425. static int aac_src_deliver_message(struct fib *fib)
  426. {
  427. struct aac_dev *dev = fib->dev;
  428. struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
  429. u32 fibsize;
  430. dma_addr_t address;
  431. struct aac_fib_xporthdr *pFibX;
  432. int native_hba;
  433. #if !defined(writeq)
  434. unsigned long flags;
  435. #endif
  436. u16 vector_no;
  437. atomic_inc(&q->numpending);
  438. native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0;
  439. if (dev->msi_enabled && dev->max_msix > 1 &&
  440. (native_hba || fib->hw_fib_va->header.Command != AifRequest)) {
  441. if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
  442. && dev->sa_firmware)
  443. vector_no = aac_get_vector(dev);
  444. else
  445. vector_no = fib->vector_no;
  446. if (native_hba) {
  447. if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
  448. struct aac_hba_tm_req *tm_req;
  449. tm_req = (struct aac_hba_tm_req *)
  450. fib->hw_fib_va;
  451. if (tm_req->iu_type ==
  452. HBA_IU_TYPE_SCSI_TM_REQ) {
  453. ((struct aac_hba_tm_req *)
  454. fib->hw_fib_va)->reply_qid
  455. = vector_no;
  456. ((struct aac_hba_tm_req *)
  457. fib->hw_fib_va)->request_id
  458. += (vector_no << 16);
  459. } else {
  460. ((struct aac_hba_reset_req *)
  461. fib->hw_fib_va)->reply_qid
  462. = vector_no;
  463. ((struct aac_hba_reset_req *)
  464. fib->hw_fib_va)->request_id
  465. += (vector_no << 16);
  466. }
  467. } else {
  468. ((struct aac_hba_cmd_req *)
  469. fib->hw_fib_va)->reply_qid
  470. = vector_no;
  471. ((struct aac_hba_cmd_req *)
  472. fib->hw_fib_va)->request_id
  473. += (vector_no << 16);
  474. }
  475. } else {
  476. fib->hw_fib_va->header.Handle += (vector_no << 16);
  477. }
  478. } else {
  479. vector_no = 0;
  480. }
  481. atomic_inc(&dev->rrq_outstanding[vector_no]);
  482. if (native_hba) {
  483. address = fib->hw_fib_pa;
  484. fibsize = (fib->hbacmd_size + 127) / 128 - 1;
  485. if (fibsize > 31)
  486. fibsize = 31;
  487. address |= fibsize;
  488. #if defined(writeq)
  489. src_writeq(dev, MUnit.IQN_L, (u64)address);
  490. #else
  491. spin_lock_irqsave(&fib->dev->iq_lock, flags);
  492. src_writel(dev, MUnit.IQN_H,
  493. upper_32_bits(address) & 0xffffffff);
  494. src_writel(dev, MUnit.IQN_L, address & 0xffffffff);
  495. spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
  496. #endif
  497. } else {
  498. if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
  499. dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
  500. /* Calculate the amount to the fibsize bits */
  501. fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size)
  502. + 127) / 128 - 1;
  503. /* New FIB header, 32-bit */
  504. address = fib->hw_fib_pa;
  505. fib->hw_fib_va->header.StructType = FIB_MAGIC2;
  506. fib->hw_fib_va->header.SenderFibAddress =
  507. cpu_to_le32((u32)address);
  508. fib->hw_fib_va->header.u.TimeStamp = 0;
  509. WARN_ON(upper_32_bits(address) != 0L);
  510. } else {
  511. /* Calculate the amount to the fibsize bits */
  512. fibsize = (sizeof(struct aac_fib_xporthdr) +
  513. le16_to_cpu(fib->hw_fib_va->header.Size)
  514. + 127) / 128 - 1;
  515. /* Fill XPORT header */
  516. pFibX = (struct aac_fib_xporthdr *)
  517. ((unsigned char *)fib->hw_fib_va -
  518. sizeof(struct aac_fib_xporthdr));
  519. pFibX->Handle = fib->hw_fib_va->header.Handle;
  520. pFibX->HostAddress =
  521. cpu_to_le64((u64)fib->hw_fib_pa);
  522. pFibX->Size = cpu_to_le32(
  523. le16_to_cpu(fib->hw_fib_va->header.Size));
  524. address = fib->hw_fib_pa -
  525. (u64)sizeof(struct aac_fib_xporthdr);
  526. }
  527. if (fibsize > 31)
  528. fibsize = 31;
  529. address |= fibsize;
  530. #if defined(writeq)
  531. src_writeq(dev, MUnit.IQ_L, (u64)address);
  532. #else
  533. spin_lock_irqsave(&fib->dev->iq_lock, flags);
  534. src_writel(dev, MUnit.IQ_H,
  535. upper_32_bits(address) & 0xffffffff);
  536. src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
  537. spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
  538. #endif
  539. }
  540. return 0;
  541. }
  542. /**
  543. * aac_src_ioremap
  544. * @size: mapping resize request
  545. *
  546. */
  547. static int aac_src_ioremap(struct aac_dev *dev, u32 size)
  548. {
  549. if (!size) {
  550. iounmap(dev->regs.src.bar1);
  551. dev->regs.src.bar1 = NULL;
  552. iounmap(dev->regs.src.bar0);
  553. dev->base = dev->regs.src.bar0 = NULL;
  554. return 0;
  555. }
  556. dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
  557. AAC_MIN_SRC_BAR1_SIZE);
  558. dev->base = NULL;
  559. if (dev->regs.src.bar1 == NULL)
  560. return -1;
  561. dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
  562. if (dev->base == NULL) {
  563. iounmap(dev->regs.src.bar1);
  564. dev->regs.src.bar1 = NULL;
  565. return -1;
  566. }
  567. dev->IndexRegs = &((struct src_registers __iomem *)
  568. dev->base)->u.tupelo.IndexRegs;
  569. return 0;
  570. }
  571. /**
  572. * aac_srcv_ioremap
  573. * @size: mapping resize request
  574. *
  575. */
  576. static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
  577. {
  578. if (!size) {
  579. iounmap(dev->regs.src.bar0);
  580. dev->base = dev->regs.src.bar0 = NULL;
  581. return 0;
  582. }
  583. dev->regs.src.bar1 =
  584. ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE);
  585. dev->base = NULL;
  586. if (dev->regs.src.bar1 == NULL)
  587. return -1;
  588. dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
  589. if (dev->base == NULL) {
  590. iounmap(dev->regs.src.bar1);
  591. dev->regs.src.bar1 = NULL;
  592. return -1;
  593. }
  594. dev->IndexRegs = &((struct src_registers __iomem *)
  595. dev->base)->u.denali.IndexRegs;
  596. return 0;
  597. }
  598. void aac_set_intx_mode(struct aac_dev *dev)
  599. {
  600. if (dev->msi_enabled) {
  601. aac_src_access_devreg(dev, AAC_ENABLE_INTX);
  602. dev->msi_enabled = 0;
  603. msleep(5000); /* Delay 5 seconds */
  604. }
  605. }
  606. static void aac_clear_omr(struct aac_dev *dev)
  607. {
  608. u32 omr_value = 0;
  609. omr_value = src_readl(dev, MUnit.OMR);
  610. /*
  611. * Check for PCI Errors or Kernel Panic
  612. */
  613. if ((omr_value == INVALID_OMR) || (omr_value & KERNEL_PANIC))
  614. omr_value = 0;
  615. /*
  616. * Preserve MSIX Value if any
  617. */
  618. src_writel(dev, MUnit.OMR, omr_value & AAC_INT_MODE_MSIX);
  619. src_readl(dev, MUnit.OMR);
  620. }
  621. static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
  622. {
  623. __le32 supported_options3;
  624. if (!aac_fib_dump)
  625. return;
  626. supported_options3 = dev->supplement_adapter_info.supported_options3;
  627. if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP))
  628. return;
  629. aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP,
  630. 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
  631. }
  632. static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
  633. {
  634. bool ctrl_up = true;
  635. unsigned long status, start;
  636. bool is_up = false;
  637. start = jiffies;
  638. do {
  639. schedule();
  640. status = src_readl(dev, MUnit.OMR);
  641. if (status == 0xffffffff)
  642. status = 0;
  643. if (status & KERNEL_BOOTING) {
  644. start = jiffies;
  645. continue;
  646. }
  647. if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) {
  648. ctrl_up = false;
  649. break;
  650. }
  651. is_up = status & KERNEL_UP_AND_RUNNING;
  652. } while (!is_up);
  653. return ctrl_up;
  654. }
  655. static void aac_src_drop_io(struct aac_dev *dev)
  656. {
  657. if (!dev->soft_reset_support)
  658. return;
  659. aac_adapter_sync_cmd(dev, DROP_IO,
  660. 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
  661. }
  662. static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
  663. {
  664. aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
  665. NULL, NULL, NULL, NULL);
  666. aac_src_drop_io(dev);
  667. }
  668. static void aac_send_iop_reset(struct aac_dev *dev)
  669. {
  670. aac_dump_fw_fib_iop_reset(dev);
  671. aac_notify_fw_of_iop_reset(dev);
  672. aac_set_intx_mode(dev);
  673. aac_clear_omr(dev);
  674. src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
  675. msleep(5000);
  676. }
  677. static void aac_send_hardware_soft_reset(struct aac_dev *dev)
  678. {
  679. u_int32_t val;
  680. aac_clear_omr(dev);
  681. val = readl(((char *)(dev->base) + IBW_SWR_OFFSET));
  682. val |= 0x01;
  683. writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET));
  684. msleep_interruptible(20000);
  685. }
  686. static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
  687. {
  688. bool is_ctrl_up;
  689. int ret = 0;
  690. if (bled < 0)
  691. goto invalid_out;
  692. if (bled)
  693. dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled);
  694. /*
  695. * When there is a BlinkLED, IOP_RESET has not effect
  696. */
  697. if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET)
  698. reset_type &= ~HW_IOP_RESET;
  699. dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
  700. dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type);
  701. if (reset_type & HW_IOP_RESET) {
  702. dev_info(&dev->pdev->dev, "Issuing IOP reset\n");
  703. aac_send_iop_reset(dev);
  704. /*
  705. * Creates a delay or wait till up and running comes thru
  706. */
  707. is_ctrl_up = aac_is_ctrl_up_and_running(dev);
  708. if (!is_ctrl_up)
  709. dev_err(&dev->pdev->dev, "IOP reset failed\n");
  710. else {
  711. dev_info(&dev->pdev->dev, "IOP reset succeeded\n");
  712. goto set_startup;
  713. }
  714. }
  715. if (!dev->sa_firmware) {
  716. dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n");
  717. ret = -ENODEV;
  718. goto out;
  719. }
  720. if (reset_type & HW_SOFT_RESET) {
  721. dev_info(&dev->pdev->dev, "Issuing SOFT reset\n");
  722. aac_send_hardware_soft_reset(dev);
  723. dev->msi_enabled = 0;
  724. is_ctrl_up = aac_is_ctrl_up_and_running(dev);
  725. if (!is_ctrl_up) {
  726. dev_err(&dev->pdev->dev, "SOFT reset failed\n");
  727. ret = -ENODEV;
  728. goto out;
  729. } else
  730. dev_info(&dev->pdev->dev, "SOFT reset succeeded\n");
  731. }
  732. set_startup:
  733. if (startup_timeout < 300)
  734. startup_timeout = 300;
  735. out:
  736. return ret;
  737. invalid_out:
  738. if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
  739. ret = -ENODEV;
  740. goto out;
  741. }
  742. /**
  743. * aac_src_select_comm - Select communications method
  744. * @dev: Adapter
  745. * @comm: communications method
  746. */
  747. static int aac_src_select_comm(struct aac_dev *dev, int comm)
  748. {
  749. switch (comm) {
  750. case AAC_COMM_MESSAGE:
  751. dev->a_ops.adapter_intr = aac_src_intr_message;
  752. dev->a_ops.adapter_deliver = aac_src_deliver_message;
  753. break;
  754. default:
  755. return 1;
  756. }
  757. return 0;
  758. }
  759. /**
  760. * aac_src_init - initialize an Cardinal Frey Bar card
  761. * @dev: device to configure
  762. *
  763. */
  764. int aac_src_init(struct aac_dev *dev)
  765. {
  766. unsigned long start;
  767. unsigned long status;
  768. int restart = 0;
  769. int instance = dev->id;
  770. const char *name = dev->name;
  771. dev->a_ops.adapter_ioremap = aac_src_ioremap;
  772. dev->a_ops.adapter_comm = aac_src_select_comm;
  773. dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
  774. if (aac_adapter_ioremap(dev, dev->base_size)) {
  775. printk(KERN_WARNING "%s: unable to map adapter.\n", name);
  776. goto error_iounmap;
  777. }
  778. /* Failure to reset here is an option ... */
  779. dev->a_ops.adapter_sync_cmd = src_sync_cmd;
  780. dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
  781. if (dev->init_reset) {
  782. dev->init_reset = false;
  783. if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
  784. ++restart;
  785. }
  786. /*
  787. * Check to see if the board panic'd while booting.
  788. */
  789. status = src_readl(dev, MUnit.OMR);
  790. if (status & KERNEL_PANIC) {
  791. if (aac_src_restart_adapter(dev,
  792. aac_src_check_health(dev), IOP_HWSOFT_RESET))
  793. goto error_iounmap;
  794. ++restart;
  795. }
  796. /*
  797. * Check to see if the board failed any self tests.
  798. */
  799. status = src_readl(dev, MUnit.OMR);
  800. if (status & SELF_TEST_FAILED) {
  801. printk(KERN_ERR "%s%d: adapter self-test failed.\n",
  802. dev->name, instance);
  803. goto error_iounmap;
  804. }
  805. /*
  806. * Check to see if the monitor panic'd while booting.
  807. */
  808. if (status & MONITOR_PANIC) {
  809. printk(KERN_ERR "%s%d: adapter monitor panic.\n",
  810. dev->name, instance);
  811. goto error_iounmap;
  812. }
  813. start = jiffies;
  814. /*
  815. * Wait for the adapter to be up and running. Wait up to 3 minutes
  816. */
  817. while (!((status = src_readl(dev, MUnit.OMR)) &
  818. KERNEL_UP_AND_RUNNING)) {
  819. if ((restart &&
  820. (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
  821. time_after(jiffies, start+HZ*startup_timeout)) {
  822. printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
  823. dev->name, instance, status);
  824. goto error_iounmap;
  825. }
  826. if (!restart &&
  827. ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
  828. time_after(jiffies, start + HZ *
  829. ((startup_timeout > 60)
  830. ? (startup_timeout - 60)
  831. : (startup_timeout / 2))))) {
  832. if (likely(!aac_src_restart_adapter(dev,
  833. aac_src_check_health(dev), IOP_HWSOFT_RESET)))
  834. start = jiffies;
  835. ++restart;
  836. }
  837. msleep(1);
  838. }
  839. if (restart && aac_commit)
  840. aac_commit = 1;
  841. /*
  842. * Fill in the common function dispatch table.
  843. */
  844. dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
  845. dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
  846. dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
  847. dev->a_ops.adapter_notify = aac_src_notify_adapter;
  848. dev->a_ops.adapter_sync_cmd = src_sync_cmd;
  849. dev->a_ops.adapter_check_health = aac_src_check_health;
  850. dev->a_ops.adapter_restart = aac_src_restart_adapter;
  851. dev->a_ops.adapter_start = aac_src_start_adapter;
  852. /*
  853. * First clear out all interrupts. Then enable the one's that we
  854. * can handle.
  855. */
  856. aac_adapter_comm(dev, AAC_COMM_MESSAGE);
  857. aac_adapter_disable_int(dev);
  858. src_writel(dev, MUnit.ODR_C, 0xffffffff);
  859. aac_adapter_enable_int(dev);
  860. if (aac_init_adapter(dev) == NULL)
  861. goto error_iounmap;
  862. if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
  863. goto error_iounmap;
  864. dev->msi = !pci_enable_msi(dev->pdev);
  865. dev->aac_msix[0].vector_no = 0;
  866. dev->aac_msix[0].dev = dev;
  867. if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
  868. IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) {
  869. if (dev->msi)
  870. pci_disable_msi(dev->pdev);
  871. printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
  872. name, instance);
  873. goto error_iounmap;
  874. }
  875. dev->dbg_base = pci_resource_start(dev->pdev, 2);
  876. dev->dbg_base_mapped = dev->regs.src.bar1;
  877. dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
  878. dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
  879. aac_adapter_enable_int(dev);
  880. if (!dev->sync_mode) {
  881. /*
  882. * Tell the adapter that all is configured, and it can
  883. * start accepting requests
  884. */
  885. aac_src_start_adapter(dev);
  886. }
  887. return 0;
  888. error_iounmap:
  889. return -1;
  890. }
  891. static int aac_src_wait_sync(struct aac_dev *dev, int *status)
  892. {
  893. unsigned long start = jiffies;
  894. unsigned long usecs = 0;
  895. int delay = 5 * HZ;
  896. int rc = 1;
  897. while (time_before(jiffies, start+delay)) {
  898. /*
  899. * Delay 5 microseconds to let Mon960 get info.
  900. */
  901. udelay(5);
  902. /*
  903. * Mon960 will set doorbell0 bit when it has completed the
  904. * command.
  905. */
  906. if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
  907. /*
  908. * Clear: the doorbell.
  909. */
  910. if (dev->msi_enabled)
  911. aac_src_access_devreg(dev, AAC_CLEAR_SYNC_BIT);
  912. else
  913. src_writel(dev, MUnit.ODR_C,
  914. OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
  915. rc = 0;
  916. break;
  917. }
  918. /*
  919. * Yield the processor in case we are slow
  920. */
  921. usecs = 1 * USEC_PER_MSEC;
  922. usleep_range(usecs, usecs + 50);
  923. }
  924. /*
  925. * Pull the synch status from Mailbox 0.
  926. */
  927. if (status && !rc) {
  928. status[0] = readl(&dev->IndexRegs->Mailbox[0]);
  929. status[1] = readl(&dev->IndexRegs->Mailbox[1]);
  930. status[2] = readl(&dev->IndexRegs->Mailbox[2]);
  931. status[3] = readl(&dev->IndexRegs->Mailbox[3]);
  932. status[4] = readl(&dev->IndexRegs->Mailbox[4]);
  933. }
  934. return rc;
  935. }
  936. /**
  937. * aac_src_soft_reset - perform soft reset to speed up
  938. * access
  939. *
  940. * Assumptions: That the controller is in a state where we can
  941. * bring it back to life with an init struct. We can only use
  942. * fast sync commands, as the timeout is 5 seconds.
  943. *
  944. * @dev: device to configure
  945. *
  946. */
  947. static int aac_src_soft_reset(struct aac_dev *dev)
  948. {
  949. u32 status_omr = src_readl(dev, MUnit.OMR);
  950. u32 status[5];
  951. int rc = 1;
  952. int state = 0;
  953. char *state_str[7] = {
  954. "GET_ADAPTER_PROPERTIES Failed",
  955. "GET_ADAPTER_PROPERTIES timeout",
  956. "SOFT_RESET not supported",
  957. "DROP_IO Failed",
  958. "DROP_IO timeout",
  959. "Check Health failed"
  960. };
  961. if (status_omr == INVALID_OMR)
  962. return 1; // pcie hosed
  963. if (!(status_omr & KERNEL_UP_AND_RUNNING))
  964. return 1; // not up and running
  965. /*
  966. * We go into soft reset mode to allow us to handle response
  967. */
  968. dev->in_soft_reset = 1;
  969. dev->msi_enabled = status_omr & AAC_INT_MODE_MSIX;
  970. /* Get adapter properties */
  971. rc = aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0,
  972. 0, 0, 0, status+0, status+1, status+2, status+3, status+4);
  973. if (rc)
  974. goto out;
  975. state++;
  976. if (aac_src_wait_sync(dev, status)) {
  977. rc = 1;
  978. goto out;
  979. }
  980. state++;
  981. if (!(status[1] & le32_to_cpu(AAC_OPT_EXTENDED) &&
  982. (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)))) {
  983. rc = 2;
  984. goto out;
  985. }
  986. if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
  987. (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
  988. dev->sa_firmware = 1;
  989. state++;
  990. rc = aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0,
  991. status+0, status+1, status+2, status+3, status+4);
  992. if (rc)
  993. goto out;
  994. state++;
  995. if (aac_src_wait_sync(dev, status)) {
  996. rc = 3;
  997. goto out;
  998. }
  999. if (status[1])
  1000. dev_err(&dev->pdev->dev, "%s: %d outstanding I/O pending\n",
  1001. __func__, status[1]);
  1002. state++;
  1003. rc = aac_src_check_health(dev);
  1004. out:
  1005. dev->in_soft_reset = 0;
  1006. dev->msi_enabled = 0;
  1007. if (rc)
  1008. dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__,
  1009. state_str[state], rc);
  1010. return rc;
  1011. }
  1012. /**
  1013. * aac_srcv_init - initialize an SRCv card
  1014. * @dev: device to configure
  1015. *
  1016. */
  1017. int aac_srcv_init(struct aac_dev *dev)
  1018. {
  1019. unsigned long start;
  1020. unsigned long status;
  1021. int restart = 0;
  1022. int instance = dev->id;
  1023. const char *name = dev->name;
  1024. dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
  1025. dev->a_ops.adapter_comm = aac_src_select_comm;
  1026. dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
  1027. if (aac_adapter_ioremap(dev, dev->base_size)) {
  1028. printk(KERN_WARNING "%s: unable to map adapter.\n", name);
  1029. goto error_iounmap;
  1030. }
  1031. /* Failure to reset here is an option ... */
  1032. dev->a_ops.adapter_sync_cmd = src_sync_cmd;
  1033. dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
  1034. if (dev->init_reset) {
  1035. dev->init_reset = false;
  1036. if (aac_src_soft_reset(dev)) {
  1037. aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET);
  1038. ++restart;
  1039. }
  1040. }
  1041. /*
  1042. * Check to see if flash update is running.
  1043. * Wait for the adapter to be up and running. Wait up to 5 minutes
  1044. */
  1045. status = src_readl(dev, MUnit.OMR);
  1046. if (status & FLASH_UPD_PENDING) {
  1047. start = jiffies;
  1048. do {
  1049. status = src_readl(dev, MUnit.OMR);
  1050. if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
  1051. printk(KERN_ERR "%s%d: adapter flash update failed.\n",
  1052. dev->name, instance);
  1053. goto error_iounmap;
  1054. }
  1055. } while (!(status & FLASH_UPD_SUCCESS) &&
  1056. !(status & FLASH_UPD_FAILED));
  1057. /* Delay 10 seconds.
  1058. * Because right now FW is doing a soft reset,
  1059. * do not read scratch pad register at this time
  1060. */
  1061. ssleep(10);
  1062. }
  1063. /*
  1064. * Check to see if the board panic'd while booting.
  1065. */
  1066. status = src_readl(dev, MUnit.OMR);
  1067. if (status & KERNEL_PANIC) {
  1068. if (aac_src_restart_adapter(dev,
  1069. aac_src_check_health(dev), IOP_HWSOFT_RESET))
  1070. goto error_iounmap;
  1071. ++restart;
  1072. }
  1073. /*
  1074. * Check to see if the board failed any self tests.
  1075. */
  1076. status = src_readl(dev, MUnit.OMR);
  1077. if (status & SELF_TEST_FAILED) {
  1078. printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
  1079. goto error_iounmap;
  1080. }
  1081. /*
  1082. * Check to see if the monitor panic'd while booting.
  1083. */
  1084. if (status & MONITOR_PANIC) {
  1085. printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
  1086. goto error_iounmap;
  1087. }
  1088. start = jiffies;
  1089. /*
  1090. * Wait for the adapter to be up and running. Wait up to 3 minutes
  1091. */
  1092. do {
  1093. status = src_readl(dev, MUnit.OMR);
  1094. if (status == INVALID_OMR)
  1095. status = 0;
  1096. if ((restart &&
  1097. (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
  1098. time_after(jiffies, start+HZ*startup_timeout)) {
  1099. printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
  1100. dev->name, instance, status);
  1101. goto error_iounmap;
  1102. }
  1103. if (!restart &&
  1104. ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
  1105. time_after(jiffies, start + HZ *
  1106. ((startup_timeout > 60)
  1107. ? (startup_timeout - 60)
  1108. : (startup_timeout / 2))))) {
  1109. if (likely(!aac_src_restart_adapter(dev,
  1110. aac_src_check_health(dev), IOP_HWSOFT_RESET)))
  1111. start = jiffies;
  1112. ++restart;
  1113. }
  1114. msleep(1);
  1115. } while (!(status & KERNEL_UP_AND_RUNNING));
  1116. if (restart && aac_commit)
  1117. aac_commit = 1;
  1118. /*
  1119. * Fill in the common function dispatch table.
  1120. */
  1121. dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
  1122. dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
  1123. dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
  1124. dev->a_ops.adapter_notify = aac_src_notify_adapter;
  1125. dev->a_ops.adapter_sync_cmd = src_sync_cmd;
  1126. dev->a_ops.adapter_check_health = aac_src_check_health;
  1127. dev->a_ops.adapter_restart = aac_src_restart_adapter;
  1128. dev->a_ops.adapter_start = aac_src_start_adapter;
  1129. /*
  1130. * First clear out all interrupts. Then enable the one's that we
  1131. * can handle.
  1132. */
  1133. aac_adapter_comm(dev, AAC_COMM_MESSAGE);
  1134. aac_adapter_disable_int(dev);
  1135. src_writel(dev, MUnit.ODR_C, 0xffffffff);
  1136. aac_adapter_enable_int(dev);
  1137. if (aac_init_adapter(dev) == NULL)
  1138. goto error_iounmap;
  1139. if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) &&
  1140. (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3))
  1141. goto error_iounmap;
  1142. if (dev->msi_enabled)
  1143. aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
  1144. if (aac_acquire_irq(dev))
  1145. goto error_iounmap;
  1146. dev->dbg_base = pci_resource_start(dev->pdev, 2);
  1147. dev->dbg_base_mapped = dev->regs.src.bar1;
  1148. dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE;
  1149. dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
  1150. aac_adapter_enable_int(dev);
  1151. if (!dev->sync_mode) {
  1152. /*
  1153. * Tell the adapter that all is configured, and it can
  1154. * start accepting requests
  1155. */
  1156. aac_src_start_adapter(dev);
  1157. }
  1158. return 0;
  1159. error_iounmap:
  1160. return -1;
  1161. }
  1162. void aac_src_access_devreg(struct aac_dev *dev, int mode)
  1163. {
  1164. u_int32_t val;
  1165. switch (mode) {
  1166. case AAC_ENABLE_INTERRUPT:
  1167. src_writel(dev,
  1168. MUnit.OIMR,
  1169. dev->OIMR = (dev->msi_enabled ?
  1170. AAC_INT_ENABLE_TYPE1_MSIX :
  1171. AAC_INT_ENABLE_TYPE1_INTX));
  1172. break;
  1173. case AAC_DISABLE_INTERRUPT:
  1174. src_writel(dev,
  1175. MUnit.OIMR,
  1176. dev->OIMR = AAC_INT_DISABLE_ALL);
  1177. break;
  1178. case AAC_ENABLE_MSIX:
  1179. /* set bit 6 */
  1180. val = src_readl(dev, MUnit.IDR);
  1181. val |= 0x40;
  1182. src_writel(dev, MUnit.IDR, val);
  1183. src_readl(dev, MUnit.IDR);
  1184. /* unmask int. */
  1185. val = PMC_ALL_INTERRUPT_BITS;
  1186. src_writel(dev, MUnit.IOAR, val);
  1187. val = src_readl(dev, MUnit.OIMR);
  1188. src_writel(dev,
  1189. MUnit.OIMR,
  1190. val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
  1191. break;
  1192. case AAC_DISABLE_MSIX:
  1193. /* reset bit 6 */
  1194. val = src_readl(dev, MUnit.IDR);
  1195. val &= ~0x40;
  1196. src_writel(dev, MUnit.IDR, val);
  1197. src_readl(dev, MUnit.IDR);
  1198. break;
  1199. case AAC_CLEAR_AIF_BIT:
  1200. /* set bit 5 */
  1201. val = src_readl(dev, MUnit.IDR);
  1202. val |= 0x20;
  1203. src_writel(dev, MUnit.IDR, val);
  1204. src_readl(dev, MUnit.IDR);
  1205. break;
  1206. case AAC_CLEAR_SYNC_BIT:
  1207. /* set bit 4 */
  1208. val = src_readl(dev, MUnit.IDR);
  1209. val |= 0x10;
  1210. src_writel(dev, MUnit.IDR, val);
  1211. src_readl(dev, MUnit.IDR);
  1212. break;
  1213. case AAC_ENABLE_INTX:
  1214. /* set bit 7 */
  1215. val = src_readl(dev, MUnit.IDR);
  1216. val |= 0x80;
  1217. src_writel(dev, MUnit.IDR, val);
  1218. src_readl(dev, MUnit.IDR);
  1219. /* unmask int. */
  1220. val = PMC_ALL_INTERRUPT_BITS;
  1221. src_writel(dev, MUnit.IOAR, val);
  1222. src_readl(dev, MUnit.IOAR);
  1223. val = src_readl(dev, MUnit.OIMR);
  1224. src_writel(dev, MUnit.OIMR,
  1225. val & (~(PMC_GLOBAL_INT_BIT2)));
  1226. break;
  1227. default:
  1228. break;
  1229. }
  1230. }
  1231. static int aac_src_get_sync_status(struct aac_dev *dev)
  1232. {
  1233. int msix_val = 0;
  1234. int legacy_val = 0;
  1235. msix_val = src_readl(dev, MUnit.ODR_MSI) & SRC_MSI_READ_MASK ? 1 : 0;
  1236. if (!dev->msi_enabled) {
  1237. /*
  1238. * if Legacy int status indicates cmd is not complete
  1239. * sample MSIx register to see if it indiactes cmd complete,
  1240. * if yes set the controller in MSIx mode and consider cmd
  1241. * completed
  1242. */
  1243. legacy_val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT;
  1244. if (!(legacy_val & 1) && msix_val)
  1245. dev->msi_enabled = 1;
  1246. return legacy_val;
  1247. }
  1248. return msix_val;
  1249. }