PageRenderTime 3068ms CodeModel.GetById 32ms RepoModel.GetById 22ms app.codeStats 1ms

/drivers/infiniband/hw/ipath/ipath_sd7220.c

https://bitbucket.org/abioy/linux
C | 1462 lines | 1003 code | 132 blank | 327 comment | 167 complexity | 6e745b86c6f1373b808279428084a52e MD5 | raw file
Possible License(s): CC-BY-SA-3.0, GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. /*
  34. * This file contains all of the code that is specific to the SerDes
  35. * on the InfiniPath 7220 chip.
  36. */
  37. #include <linux/pci.h>
  38. #include <linux/delay.h>
  39. #include "ipath_kernel.h"
  40. #include "ipath_registers.h"
  41. #include "ipath_7220.h"
  42. /*
  43. * The IBSerDesMappTable is a memory that holds values to be stored in
  44. * various SerDes registers by IBC. It is not part of the normal kregs
  45. * map and is used in exactly one place, hence the #define below.
  46. */
  47. #define KR_IBSerDesMappTable (0x94000 / (sizeof(uint64_t)))
  48. /*
  49. * Below used for sdnum parameter, selecting one of the two sections
  50. * used for PCIe, or the single SerDes used for IB.
  51. */
  52. #define PCIE_SERDES0 0
  53. #define PCIE_SERDES1 1
  54. /*
  55. * The EPB requires addressing in a particular form. EPB_LOC() is intended
  56. * to make #definitions a little more readable.
  57. */
  58. #define EPB_ADDR_SHF 8
  59. #define EPB_LOC(chn, elt, reg) \
  60. (((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \
  61. EPB_ADDR_SHF)
  62. #define EPB_IB_QUAD0_CS_SHF (25)
  63. #define EPB_IB_QUAD0_CS (1U << EPB_IB_QUAD0_CS_SHF)
  64. #define EPB_IB_UC_CS_SHF (26)
  65. #define EPB_PCIE_UC_CS_SHF (27)
  66. #define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
  67. /* Forward declarations. */
  68. static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
  69. u32 data, u32 mask);
  70. static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
  71. int mask);
  72. static int ipath_sd_trimdone_poll(struct ipath_devdata *dd);
  73. static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
  74. const char *where);
  75. static int ipath_sd_setvals(struct ipath_devdata *dd);
  76. static int ipath_sd_early(struct ipath_devdata *dd);
  77. static int ipath_sd_dactrim(struct ipath_devdata *dd);
  78. /* Set the registers that IBC may muck with to their default "preset" values */
  79. int ipath_sd7220_presets(struct ipath_devdata *dd);
  80. static int ipath_internal_presets(struct ipath_devdata *dd);
  81. /* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
  82. static int ipath_sd_trimself(struct ipath_devdata *dd, int val);
  83. static int epb_access(struct ipath_devdata *dd, int sdnum, int claim);
  84. void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup);
  85. /*
  86. * Below keeps track of whether the "once per power-on" initialization has
  87. * been done, because uC code Version 1.32.17 or higher allows the uC to
  88. * be reset at will, and Automatic Equalization may require it. So the
  89. * state of the reset "pin", as reflected in was_reset parameter to
  90. * ipath_sd7220_init() is no longer valid. Instead, we check for the
  91. * actual uC code having been loaded.
  92. */
  93. static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd)
  94. {
  95. if (!dd->serdes_first_init_done && (ipath_sd7220_ib_vfy(dd) > 0))
  96. dd->serdes_first_init_done = 1;
  97. return dd->serdes_first_init_done;
  98. }
  99. /* repeat #define for local use. "Real" #define is in ipath_iba7220.c */
  100. #define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
  101. #define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
  102. #define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
  103. #define UC_PAR_CLR_D 8
  104. #define UC_PAR_CLR_M 0xC
  105. #define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
  106. #define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
  107. void ipath_sd7220_clr_ibpar(struct ipath_devdata *dd)
  108. {
  109. int ret;
  110. /* clear, then re-enable parity errs */
  111. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
  112. UC_PAR_CLR_D, UC_PAR_CLR_M);
  113. if (ret < 0) {
  114. ipath_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
  115. goto bail;
  116. }
  117. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
  118. UC_PAR_CLR_M);
  119. ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
  120. udelay(4);
  121. ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
  122. INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
  123. ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
  124. bail:
  125. return;
  126. }
  127. /*
  128. * After a reset or other unusual event, the epb interface may need
  129. * to be re-synchronized, between the host and the uC.
  130. * returns <0 for failure to resync within IBSD_RESYNC_TRIES (not expected)
  131. */
  132. #define IBSD_RESYNC_TRIES 3
  133. #define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
  134. #define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
  135. static int ipath_resync_ibepb(struct ipath_devdata *dd)
  136. {
  137. int ret, pat, tries, chn;
  138. u32 loc;
  139. ret = -1;
  140. chn = 0;
  141. for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
  142. loc = IB_PGUDP(chn);
  143. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
  144. if (ret < 0) {
  145. ipath_dev_err(dd, "Failed read in resync\n");
  146. continue;
  147. }
  148. if (ret != 0xF0 && ret != 0x55 && tries == 0)
  149. ipath_dev_err(dd, "unexpected pattern in resync\n");
  150. pat = ret ^ 0xA5; /* alternate F0 and 55 */
  151. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
  152. if (ret < 0) {
  153. ipath_dev_err(dd, "Failed write in resync\n");
  154. continue;
  155. }
  156. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
  157. if (ret < 0) {
  158. ipath_dev_err(dd, "Failed re-read in resync\n");
  159. continue;
  160. }
  161. if (ret != pat) {
  162. ipath_dev_err(dd, "Failed compare1 in resync\n");
  163. continue;
  164. }
  165. loc = IB_CMUDONE(chn);
  166. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
  167. if (ret < 0) {
  168. ipath_dev_err(dd, "Failed CMUDONE rd in resync\n");
  169. continue;
  170. }
  171. if ((ret & 0x70) != ((chn << 4) | 0x40)) {
  172. ipath_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
  173. ret, chn);
  174. continue;
  175. }
  176. if (++chn == 4)
  177. break; /* Success */
  178. }
  179. ipath_cdbg(VERBOSE, "Resync in %d tries\n", tries);
  180. return (ret > 0) ? 0 : ret;
  181. }
  182. /*
  183. * Localize the stuff that should be done to change IB uC reset
  184. * returns <0 for errors.
  185. */
  186. static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
  187. {
  188. u64 rst_val;
  189. int ret = 0;
  190. unsigned long flags;
  191. rst_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
  192. if (assert_rst) {
  193. /*
  194. * Vendor recommends "interrupting" uC before reset, to
  195. * minimize possible glitches.
  196. */
  197. spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
  198. epb_access(dd, IB_7220_SERDES, 1);
  199. rst_val |= 1ULL;
  200. /* Squelch possible parity error from _asserting_ reset */
  201. ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
  202. dd->ipath_hwerrmask &
  203. ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
  204. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
  205. /* flush write, delay to ensure it took effect */
  206. ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
  207. udelay(2);
  208. /* once it's reset, can remove interrupt */
  209. epb_access(dd, IB_7220_SERDES, -1);
  210. spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
  211. } else {
  212. /*
  213. * Before we de-assert reset, we need to deal with
  214. * possible glitch on the Parity-error line.
  215. * Suppress it around the reset, both in chip-level
  216. * hwerrmask and in IB uC control reg. uC will allow
  217. * it again during startup.
  218. */
  219. u64 val;
  220. rst_val &= ~(1ULL);
  221. ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
  222. dd->ipath_hwerrmask &
  223. ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
  224. ret = ipath_resync_ibepb(dd);
  225. if (ret < 0)
  226. ipath_dev_err(dd, "unable to re-sync IB EPB\n");
  227. /* set uC control regs to suppress parity errs */
  228. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
  229. if (ret < 0)
  230. goto bail;
  231. /* IB uC code past Version 1.32.17 allow suppression of wdog */
  232. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
  233. 0x80);
  234. if (ret < 0) {
  235. ipath_dev_err(dd, "Failed to set WDOG disable\n");
  236. goto bail;
  237. }
  238. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
  239. /* flush write, delay for startup */
  240. ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
  241. udelay(1);
  242. /* clear, then re-enable parity errs */
  243. ipath_sd7220_clr_ibpar(dd);
  244. val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
  245. if (val & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) {
  246. ipath_dev_err(dd, "IBUC Parity still set after RST\n");
  247. dd->ipath_hwerrmask &=
  248. ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
  249. }
  250. ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
  251. dd->ipath_hwerrmask);
  252. }
  253. bail:
  254. return ret;
  255. }
  256. static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
  257. const char *where)
  258. {
  259. int ret, chn, baduns;
  260. u64 val;
  261. if (!where)
  262. where = "?";
  263. /* give time for reset to settle out in EPB */
  264. udelay(2);
  265. ret = ipath_resync_ibepb(dd);
  266. if (ret < 0)
  267. ipath_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
  268. /* Do "sacrificial read" to get EPB in sane state after reset */
  269. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
  270. if (ret < 0)
  271. ipath_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
  272. /* Check/show "summary" Trim-done bit in IBCStatus */
  273. val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
  274. if (val & (1ULL << 11))
  275. ipath_cdbg(VERBOSE, "IBCS TRIMDONE set (%s)\n", where);
  276. else
  277. ipath_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
  278. udelay(2);
  279. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
  280. if (ret < 0)
  281. ipath_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
  282. udelay(10);
  283. baduns = 0;
  284. for (chn = 3; chn >= 0; --chn) {
  285. /* Read CTRL reg for each channel to check TRIMDONE */
  286. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
  287. IB_CTRL2(chn), 0, 0);
  288. if (ret < 0)
  289. ipath_dev_err(dd, "Failed checking TRIMDONE, chn %d"
  290. " (%s)\n", chn, where);
  291. if (!(ret & 0x10)) {
  292. int probe;
  293. baduns |= (1 << chn);
  294. ipath_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
  295. " (%s)\n", chn, ret, where);
  296. probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
  297. IB_PGUDP(0), 0, 0);
  298. ipath_dev_err(dd, "probe is %d (%02X)\n",
  299. probe, probe);
  300. probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
  301. IB_CTRL2(chn), 0, 0);
  302. ipath_dev_err(dd, "re-read: %d (%02X)\n",
  303. probe, probe);
  304. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
  305. IB_CTRL2(chn), 0x10, 0x10);
  306. if (ret < 0)
  307. ipath_dev_err(dd,
  308. "Err on TRIMDONE rewrite1\n");
  309. }
  310. }
  311. for (chn = 3; chn >= 0; --chn) {
  312. /* Read CTRL reg for each channel to check TRIMDONE */
  313. if (baduns & (1 << chn)) {
  314. ipath_dev_err(dd,
  315. "Reseting TRIMDONE on chn %d (%s)\n",
  316. chn, where);
  317. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
  318. IB_CTRL2(chn), 0x10, 0x10);
  319. if (ret < 0)
  320. ipath_dev_err(dd, "Failed re-setting "
  321. "TRIMDONE, chn %d (%s)\n",
  322. chn, where);
  323. }
  324. }
  325. }
  326. /*
  327. * Below is portion of IBA7220-specific bringup_serdes() that actually
  328. * deals with registers and memory within the SerDes itself.
  329. * Post IB uC code version 1.32.17, was_reset being 1 is not really
  330. * informative, so we double-check.
  331. */
  332. int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset)
  333. {
  334. int ret = 1; /* default to failure */
  335. int first_reset;
  336. int val_stat;
  337. if (!was_reset) {
  338. /* entered with reset not asserted, we need to do it */
  339. ipath_ibsd_reset(dd, 1);
  340. ipath_sd_trimdone_monitor(dd, "Driver-reload");
  341. }
  342. /* Substitute our deduced value for was_reset */
  343. ret = ipath_ibsd_ucode_loaded(dd);
  344. if (ret < 0) {
  345. ret = 1;
  346. goto done;
  347. }
  348. first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
  349. /*
  350. * Alter some regs per vendor latest doc, reset-defaults
  351. * are not right for IB.
  352. */
  353. ret = ipath_sd_early(dd);
  354. if (ret < 0) {
  355. ipath_dev_err(dd, "Failed to set IB SERDES early defaults\n");
  356. ret = 1;
  357. goto done;
  358. }
  359. /*
  360. * Set DAC manual trim IB.
  361. * We only do this once after chip has been reset (usually
  362. * same as once per system boot).
  363. */
  364. if (first_reset) {
  365. ret = ipath_sd_dactrim(dd);
  366. if (ret < 0) {
  367. ipath_dev_err(dd, "Failed IB SERDES DAC trim\n");
  368. ret = 1;
  369. goto done;
  370. }
  371. }
  372. /*
  373. * Set various registers (DDS and RXEQ) that will be
  374. * controlled by IBC (in 1.2 mode) to reasonable preset values
  375. * Calling the "internal" version avoids the "check for needed"
  376. * and "trimdone monitor" that might be counter-productive.
  377. */
  378. ret = ipath_internal_presets(dd);
  379. if (ret < 0) {
  380. ipath_dev_err(dd, "Failed to set IB SERDES presets\n");
  381. ret = 1;
  382. goto done;
  383. }
  384. ret = ipath_sd_trimself(dd, 0x80);
  385. if (ret < 0) {
  386. ipath_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
  387. ret = 1;
  388. goto done;
  389. }
  390. /* Load image, then try to verify */
  391. ret = 0; /* Assume success */
  392. if (first_reset) {
  393. int vfy;
  394. int trim_done;
  395. ipath_dbg("SerDes uC was reset, reloading PRAM\n");
  396. ret = ipath_sd7220_ib_load(dd);
  397. if (ret < 0) {
  398. ipath_dev_err(dd, "Failed to load IB SERDES image\n");
  399. ret = 1;
  400. goto done;
  401. }
  402. /* Loaded image, try to verify */
  403. vfy = ipath_sd7220_ib_vfy(dd);
  404. if (vfy != ret) {
  405. ipath_dev_err(dd, "SERDES PRAM VFY failed\n");
  406. ret = 1;
  407. goto done;
  408. }
  409. /*
  410. * Loaded and verified. Almost good...
  411. * hold "success" in ret
  412. */
  413. ret = 0;
  414. /*
  415. * Prev steps all worked, continue bringup
  416. * De-assert RESET to uC, only in first reset, to allow
  417. * trimming.
  418. *
  419. * Since our default setup sets START_EQ1 to
  420. * PRESET, we need to clear that for this very first run.
  421. */
  422. ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
  423. if (ret < 0) {
  424. ipath_dev_err(dd, "Failed clearing START_EQ1\n");
  425. ret = 1;
  426. goto done;
  427. }
  428. ipath_ibsd_reset(dd, 0);
  429. /*
  430. * If this is not the first reset, trimdone should be set
  431. * already.
  432. */
  433. trim_done = ipath_sd_trimdone_poll(dd);
  434. /*
  435. * Whether or not trimdone succeeded, we need to put the
  436. * uC back into reset to avoid a possible fight with the
  437. * IBC state-machine.
  438. */
  439. ipath_ibsd_reset(dd, 1);
  440. if (!trim_done) {
  441. ipath_dev_err(dd, "No TRIMDONE seen\n");
  442. ret = 1;
  443. goto done;
  444. }
  445. ipath_sd_trimdone_monitor(dd, "First-reset");
  446. /* Remember so we do not re-do the load, dactrim, etc. */
  447. dd->serdes_first_init_done = 1;
  448. }
  449. /*
  450. * Setup for channel training and load values for
  451. * RxEq and DDS in tables used by IBC in IB1.2 mode
  452. */
  453. val_stat = ipath_sd_setvals(dd);
  454. if (val_stat < 0)
  455. ret = 1;
  456. done:
  457. /* start relock timer regardless, but start at 1 second */
  458. ipath_set_relock_poll(dd, -1);
  459. return ret;
  460. }
  461. #define EPB_ACC_REQ 1
  462. #define EPB_ACC_GNT 0x100
  463. #define EPB_DATA_MASK 0xFF
  464. #define EPB_RD (1ULL << 24)
  465. #define EPB_TRANS_RDY (1ULL << 31)
  466. #define EPB_TRANS_ERR (1ULL << 30)
  467. #define EPB_TRANS_TRIES 5
  468. /*
  469. * query, claim, release ownership of the EPB (External Parallel Bus)
  470. * for a specified SERDES.
  471. * the "claim" parameter is >0 to claim, <0 to release, 0 to query.
  472. * Returns <0 for errors, >0 if we had ownership, else 0.
  473. */
  474. static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
  475. {
  476. u16 acc;
  477. u64 accval;
  478. int owned = 0;
  479. u64 oct_sel = 0;
  480. switch (sdnum) {
  481. case IB_7220_SERDES :
  482. /*
  483. * The IB SERDES "ownership" is fairly simple. A single each
  484. * request/grant.
  485. */
  486. acc = dd->ipath_kregs->kr_ib_epbacc;
  487. break;
  488. case PCIE_SERDES0 :
  489. case PCIE_SERDES1 :
  490. /* PCIe SERDES has two "octants", need to select which */
  491. acc = dd->ipath_kregs->kr_pcie_epbacc;
  492. oct_sel = (2 << (sdnum - PCIE_SERDES0));
  493. break;
  494. default :
  495. return 0;
  496. }
  497. /* Make sure any outstanding transaction was seen */
  498. ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
  499. udelay(15);
  500. accval = ipath_read_kreg32(dd, acc);
  501. owned = !!(accval & EPB_ACC_GNT);
  502. if (claim < 0) {
  503. /* Need to release */
  504. u64 pollval;
  505. /*
  506. * The only writeable bits are the request and CS.
  507. * Both should be clear
  508. */
  509. u64 newval = 0;
  510. ipath_write_kreg(dd, acc, newval);
  511. /* First read after write is not trustworthy */
  512. pollval = ipath_read_kreg32(dd, acc);
  513. udelay(5);
  514. pollval = ipath_read_kreg32(dd, acc);
  515. if (pollval & EPB_ACC_GNT)
  516. owned = -1;
  517. } else if (claim > 0) {
  518. /* Need to claim */
  519. u64 pollval;
  520. u64 newval = EPB_ACC_REQ | oct_sel;
  521. ipath_write_kreg(dd, acc, newval);
  522. /* First read after write is not trustworthy */
  523. pollval = ipath_read_kreg32(dd, acc);
  524. udelay(5);
  525. pollval = ipath_read_kreg32(dd, acc);
  526. if (!(pollval & EPB_ACC_GNT))
  527. owned = -1;
  528. }
  529. return owned;
  530. }
  531. /*
  532. * Lemma to deal with race condition of write..read to epb regs
  533. */
  534. static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
  535. {
  536. int tries;
  537. u64 transval;
  538. ipath_write_kreg(dd, reg, i_val);
  539. /* Throw away first read, as RDY bit may be stale */
  540. transval = ipath_read_kreg64(dd, reg);
  541. for (tries = EPB_TRANS_TRIES; tries; --tries) {
  542. transval = ipath_read_kreg32(dd, reg);
  543. if (transval & EPB_TRANS_RDY)
  544. break;
  545. udelay(5);
  546. }
  547. if (transval & EPB_TRANS_ERR)
  548. return -1;
  549. if (tries > 0 && o_vp)
  550. *o_vp = transval;
  551. return tries;
  552. }
  553. /**
  554. *
  555. * ipath_sd7220_reg_mod - modify SERDES register
  556. * @dd: the infinipath device
  557. * @sdnum: which SERDES to access
  558. * @loc: location - channel, element, register, as packed by EPB_LOC() macro.
  559. * @wd: Write Data - value to set in register
  560. * @mask: ones where data should be spliced into reg.
  561. *
  562. * Basic register read/modify/write, with un-needed accesses elided. That is,
  563. * a mask of zero will prevent write, while a mask of 0xFF will prevent read.
  564. * returns current (presumed, if a write was done) contents of selected
  565. * register, or <0 if errors.
  566. */
  567. static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
  568. u32 wd, u32 mask)
  569. {
  570. u16 trans;
  571. u64 transval;
  572. int owned;
  573. int tries, ret;
  574. unsigned long flags;
  575. switch (sdnum) {
  576. case IB_7220_SERDES :
  577. trans = dd->ipath_kregs->kr_ib_epbtrans;
  578. break;
  579. case PCIE_SERDES0 :
  580. case PCIE_SERDES1 :
  581. trans = dd->ipath_kregs->kr_pcie_epbtrans;
  582. break;
  583. default :
  584. return -1;
  585. }
  586. /*
  587. * All access is locked in software (vs other host threads) and
  588. * hardware (vs uC access).
  589. */
  590. spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
  591. owned = epb_access(dd, sdnum, 1);
  592. if (owned < 0) {
  593. spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
  594. return -1;
  595. }
  596. ret = 0;
  597. for (tries = EPB_TRANS_TRIES; tries; --tries) {
  598. transval = ipath_read_kreg32(dd, trans);
  599. if (transval & EPB_TRANS_RDY)
  600. break;
  601. udelay(5);
  602. }
  603. if (tries > 0) {
  604. tries = 1; /* to make read-skip work */
  605. if (mask != 0xFF) {
  606. /*
  607. * Not a pure write, so need to read.
  608. * loc encodes chip-select as well as address
  609. */
  610. transval = loc | EPB_RD;
  611. tries = epb_trans(dd, trans, transval, &transval);
  612. }
  613. if (tries > 0 && mask != 0) {
  614. /*
  615. * Not a pure read, so need to write.
  616. */
  617. wd = (wd & mask) | (transval & ~mask);
  618. transval = loc | (wd & EPB_DATA_MASK);
  619. tries = epb_trans(dd, trans, transval, &transval);
  620. }
  621. }
  622. /* else, failed to see ready, what error-handling? */
  623. /*
  624. * Release bus. Failure is an error.
  625. */
  626. if (epb_access(dd, sdnum, -1) < 0)
  627. ret = -1;
  628. else
  629. ret = transval & EPB_DATA_MASK;
  630. spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
  631. if (tries <= 0)
  632. ret = -1;
  633. return ret;
  634. }
  635. #define EPB_ROM_R (2)
  636. #define EPB_ROM_W (1)
  637. /*
  638. * Below, all uC-related, use appropriate UC_CS, depending
  639. * on which SerDes is used.
  640. */
  641. #define EPB_UC_CTL EPB_LOC(6, 0, 0)
  642. #define EPB_MADDRL EPB_LOC(6, 0, 2)
  643. #define EPB_MADDRH EPB_LOC(6, 0, 3)
  644. #define EPB_ROMDATA EPB_LOC(6, 0, 4)
  645. #define EPB_RAMDATA EPB_LOC(6, 0, 5)
  646. /* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
  647. static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
  648. u8 *buf, int cnt, int rd_notwr)
  649. {
  650. u16 trans;
  651. u64 transval;
  652. u64 csbit;
  653. int owned;
  654. int tries;
  655. int sofar;
  656. int addr;
  657. int ret;
  658. unsigned long flags;
  659. const char *op;
  660. /* Pick appropriate transaction reg and "Chip select" for this serdes */
  661. switch (sdnum) {
  662. case IB_7220_SERDES :
  663. csbit = 1ULL << EPB_IB_UC_CS_SHF;
  664. trans = dd->ipath_kregs->kr_ib_epbtrans;
  665. break;
  666. case PCIE_SERDES0 :
  667. case PCIE_SERDES1 :
  668. /* PCIe SERDES has uC "chip select" in different bit, too */
  669. csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
  670. trans = dd->ipath_kregs->kr_pcie_epbtrans;
  671. break;
  672. default :
  673. return -1;
  674. }
  675. op = rd_notwr ? "Rd" : "Wr";
  676. spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
  677. owned = epb_access(dd, sdnum, 1);
  678. if (owned < 0) {
  679. spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
  680. ipath_dbg("Could not get %s access to %s EPB: %X, loc %X\n",
  681. op, (sdnum == IB_7220_SERDES) ? "IB" : "PCIe",
  682. owned, loc);
  683. return -1;
  684. }
  685. /*
  686. * In future code, we may need to distinguish several address ranges,
  687. * and select various memories based on this. For now, just trim
  688. * "loc" (location including address and memory select) to
  689. * "addr" (address within memory). we will only support PRAM
  690. * The memory is 8KB.
  691. */
  692. addr = loc & 0x1FFF;
  693. for (tries = EPB_TRANS_TRIES; tries; --tries) {
  694. transval = ipath_read_kreg32(dd, trans);
  695. if (transval & EPB_TRANS_RDY)
  696. break;
  697. udelay(5);
  698. }
  699. sofar = 0;
  700. if (tries <= 0)
  701. ipath_dbg("No initial RDY on EPB access request\n");
  702. else {
  703. /*
  704. * Every "memory" access is doubly-indirect.
  705. * We set two bytes of address, then read/write
  706. * one or mores bytes of data.
  707. */
  708. /* First, we set control to "Read" or "Write" */
  709. transval = csbit | EPB_UC_CTL |
  710. (rd_notwr ? EPB_ROM_R : EPB_ROM_W);
  711. tries = epb_trans(dd, trans, transval, &transval);
  712. if (tries <= 0)
  713. ipath_dbg("No EPB response to uC %s cmd\n", op);
  714. while (tries > 0 && sofar < cnt) {
  715. if (!sofar) {
  716. /* Only set address at start of chunk */
  717. int addrbyte = (addr + sofar) >> 8;
  718. transval = csbit | EPB_MADDRH | addrbyte;
  719. tries = epb_trans(dd, trans, transval,
  720. &transval);
  721. if (tries <= 0) {
  722. ipath_dbg("No EPB response ADDRH\n");
  723. break;
  724. }
  725. addrbyte = (addr + sofar) & 0xFF;
  726. transval = csbit | EPB_MADDRL | addrbyte;
  727. tries = epb_trans(dd, trans, transval,
  728. &transval);
  729. if (tries <= 0) {
  730. ipath_dbg("No EPB response ADDRL\n");
  731. break;
  732. }
  733. }
  734. if (rd_notwr)
  735. transval = csbit | EPB_ROMDATA | EPB_RD;
  736. else
  737. transval = csbit | EPB_ROMDATA | buf[sofar];
  738. tries = epb_trans(dd, trans, transval, &transval);
  739. if (tries <= 0) {
  740. ipath_dbg("No EPB response DATA\n");
  741. break;
  742. }
  743. if (rd_notwr)
  744. buf[sofar] = transval & EPB_DATA_MASK;
  745. ++sofar;
  746. }
  747. /* Finally, clear control-bit for Read or Write */
  748. transval = csbit | EPB_UC_CTL;
  749. tries = epb_trans(dd, trans, transval, &transval);
  750. if (tries <= 0)
  751. ipath_dbg("No EPB response to drop of uC %s cmd\n", op);
  752. }
  753. ret = sofar;
  754. /* Release bus. Failure is an error */
  755. if (epb_access(dd, sdnum, -1) < 0)
  756. ret = -1;
  757. spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
  758. if (tries <= 0) {
  759. ipath_dbg("SERDES PRAM %s failed after %d bytes\n", op, sofar);
  760. ret = -1;
  761. }
  762. return ret;
  763. }
  764. #define PROG_CHUNK 64
  765. int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
  766. u8 *img, int len, int offset)
  767. {
  768. int cnt, sofar, req;
  769. sofar = 0;
  770. while (sofar < len) {
  771. req = len - sofar;
  772. if (req > PROG_CHUNK)
  773. req = PROG_CHUNK;
  774. cnt = ipath_sd7220_ram_xfer(dd, sdnum, offset + sofar,
  775. img + sofar, req, 0);
  776. if (cnt < req) {
  777. sofar = -1;
  778. break;
  779. }
  780. sofar += req;
  781. }
  782. return sofar;
  783. }
  784. #define VFY_CHUNK 64
  785. #define SD_PRAM_ERROR_LIMIT 42
  786. int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
  787. const u8 *img, int len, int offset)
  788. {
  789. int cnt, sofar, req, idx, errors;
  790. unsigned char readback[VFY_CHUNK];
  791. errors = 0;
  792. sofar = 0;
  793. while (sofar < len) {
  794. req = len - sofar;
  795. if (req > VFY_CHUNK)
  796. req = VFY_CHUNK;
  797. cnt = ipath_sd7220_ram_xfer(dd, sdnum, sofar + offset,
  798. readback, req, 1);
  799. if (cnt < req) {
  800. /* failed in read itself */
  801. sofar = -1;
  802. break;
  803. }
  804. for (idx = 0; idx < cnt; ++idx) {
  805. if (readback[idx] != img[idx+sofar])
  806. ++errors;
  807. }
  808. sofar += cnt;
  809. }
  810. return errors ? -errors : sofar;
  811. }
  812. /* IRQ not set up at this point in init, so we poll. */
  813. #define IB_SERDES_TRIM_DONE (1ULL << 11)
  814. #define TRIM_TMO (30)
  815. static int ipath_sd_trimdone_poll(struct ipath_devdata *dd)
  816. {
  817. int trim_tmo, ret;
  818. uint64_t val;
  819. /*
  820. * Default to failure, so IBC will not start
  821. * without IB_SERDES_TRIM_DONE.
  822. */
  823. ret = 0;
  824. for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
  825. val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
  826. if (val & IB_SERDES_TRIM_DONE) {
  827. ipath_cdbg(VERBOSE, "TRIMDONE after %d\n", trim_tmo);
  828. ret = 1;
  829. break;
  830. }
  831. msleep(10);
  832. }
  833. if (trim_tmo >= TRIM_TMO) {
  834. ipath_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
  835. ret = 0;
  836. }
  837. return ret;
  838. }
  839. #define TX_FAST_ELT (9)
  840. /*
  841. * Set the "negotiation" values for SERDES. These are used by the IB1.2
  842. * link negotiation. Macros below are attempt to keep the values a
  843. * little more human-editable.
  844. * First, values related to Drive De-emphasis Settings.
  845. */
  846. #define NUM_DDS_REGS 6
  847. #define DDS_REG_MAP 0x76A910 /* LSB-first list of regs (in elt 9) to mod */
  848. #define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \
  849. { { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \
  850. (main_d << 3) | 4 | (ipre_d >> 2), \
  851. (main_s << 3) | 4 | (ipre_s >> 2), \
  852. ((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \
  853. ((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } }
  854. static struct dds_init {
  855. uint8_t reg_vals[NUM_DDS_REGS];
  856. } dds_init_vals[] = {
  857. /* DDR(FDR) SDR(HDR) */
  858. /* Vendor recommends below for 3m cable */
  859. #define DDS_3M 0
  860. DDS_VAL(31, 19, 12, 0, 29, 22, 9, 0),
  861. DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1),
  862. DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0),
  863. DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0),
  864. DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0),
  865. DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0),
  866. DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0),
  867. DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0),
  868. DDS_VAL(31, 20, 11, 0, 28, 23, 8, 0),
  869. DDS_VAL(31, 21, 10, 0, 27, 24, 7, 0),
  870. DDS_VAL(31, 22, 9, 0, 26, 25, 6, 0),
  871. DDS_VAL(30, 23, 8, 0, 25, 26, 5, 0),
  872. DDS_VAL(29, 24, 7, 0, 23, 27, 4, 0),
  873. /* Vendor recommends below for 1m cable */
  874. #define DDS_1M 13
  875. DDS_VAL(28, 25, 6, 0, 21, 28, 3, 0),
  876. DDS_VAL(27, 26, 5, 0, 19, 29, 2, 0),
  877. DDS_VAL(25, 27, 4, 0, 17, 30, 1, 0)
  878. };
  879. /*
  880. * Next, values related to Receive Equalization.
  881. * In comments, FDR (Full) is IB DDR, HDR (Half) is IB SDR
  882. */
  883. /* Hardware packs an element number and register address thus: */
  884. #define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
  885. #define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \
  886. {RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} }
  887. #define RXEQ_VAL_ALL(elt, adr, val) \
  888. {RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} }
  889. #define RXEQ_SDR_DFELTH 0
  890. #define RXEQ_SDR_TLTH 0
  891. #define RXEQ_SDR_G1CNT_Z1CNT 0x11
  892. #define RXEQ_SDR_ZCNT 23
  893. static struct rxeq_init {
  894. u16 rdesc; /* in form used in SerDesDDSRXEQ */
  895. u8 rdata[4];
  896. } rxeq_init_vals[] = {
  897. /* Set Rcv Eq. to Preset node */
  898. RXEQ_VAL_ALL(7, 0x27, 0x10),
  899. /* Set DFELTHFDR/HDR thresholds */
  900. RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR */
  901. RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
  902. /* Set TLTHFDR/HDR threshold */
  903. RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR */
  904. RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR */
  905. /* Set Preamp setting 2 (ZFR/ZCNT) */
  906. RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR */
  907. RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR */
  908. /* Set Preamp DC gain and Setting 1 (GFR/GHR) */
  909. RXEQ_VAL(7, 0x1E, 0x10, 0x10, 0x10, 0x10), /* FDR */
  910. RXEQ_VAL(7, 0x1F, 0x10, 0x10, 0x10, 0x10), /* HDR */
  911. /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
  912. RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
  913. RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
  914. };
  915. /* There are 17 values from vendor, but IBC only accesses the first 16 */
  916. #define DDS_ROWS (16)
  917. #define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
  918. static int ipath_sd_setvals(struct ipath_devdata *dd)
  919. {
  920. int idx, midx;
  921. int min_idx; /* Minimum index for this portion of table */
  922. uint32_t dds_reg_map;
  923. u64 __iomem *taddr, *iaddr;
  924. uint64_t data;
  925. uint64_t sdctl;
  926. taddr = dd->ipath_kregbase + KR_IBSerDesMappTable;
  927. iaddr = dd->ipath_kregbase + dd->ipath_kregs->kr_ib_ddsrxeq;
  928. /*
  929. * Init the DDS section of the table.
  930. * Each "row" of the table provokes NUM_DDS_REG writes, to the
  931. * registers indicated in DDS_REG_MAP.
  932. */
  933. sdctl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
  934. sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
  935. sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
  936. ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, sdctl);
  937. /*
  938. * Iterate down table within loop for each register to store.
  939. */
  940. dds_reg_map = DDS_REG_MAP;
  941. for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
  942. data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
  943. writeq(data, iaddr + idx);
  944. mmiowb();
  945. ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
  946. dds_reg_map >>= 4;
  947. for (midx = 0; midx < DDS_ROWS; ++midx) {
  948. u64 __iomem *daddr = taddr + ((midx << 4) + idx);
  949. data = dds_init_vals[midx].reg_vals[idx];
  950. writeq(data, daddr);
  951. mmiowb();
  952. ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
  953. } /* End inner for (vals for this reg, each row) */
  954. } /* end outer for (regs to be stored) */
  955. /*
  956. * Init the RXEQ section of the table. As explained above the table
  957. * rxeq_init_vals[], this runs in a different order, as the pattern
  958. * of register references is more complex, but there are only
  959. * four "data" values per register.
  960. */
  961. min_idx = idx; /* RXEQ indices pick up where DDS left off */
  962. taddr += 0x100; /* RXEQ data is in second half of table */
  963. /* Iterate through RXEQ register addresses */
  964. for (idx = 0; idx < RXEQ_ROWS; ++idx) {
  965. int didx; /* "destination" */
  966. int vidx;
  967. /* didx is offset by min_idx to address RXEQ range of regs */
  968. didx = idx + min_idx;
  969. /* Store the next RXEQ register address */
  970. writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
  971. mmiowb();
  972. ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
  973. /* Iterate through RXEQ values */
  974. for (vidx = 0; vidx < 4; vidx++) {
  975. data = rxeq_init_vals[idx].rdata[vidx];
  976. writeq(data, taddr + (vidx << 6) + idx);
  977. mmiowb();
  978. ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
  979. }
  980. } /* end outer for (Reg-writes for RXEQ) */
  981. return 0;
  982. }
  983. #define CMUCTRL5 EPB_LOC(7, 0, 0x15)
  984. #define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0)
  985. #define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5)
  986. #define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6)
  987. #define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
  988. #define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
  989. static int ibsd_sto_noisy(struct ipath_devdata *dd, int loc, int val, int mask)
  990. {
  991. int ret = -1;
  992. int sloc; /* shifted loc, for messages */
  993. loc |= (1U << EPB_IB_QUAD0_CS_SHF);
  994. sloc = loc >> EPB_ADDR_SHF;
  995. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, mask);
  996. if (ret < 0)
  997. ipath_dev_err(dd, "Write failed: elt %d,"
  998. " addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
  999. (sloc & 0xF), (sloc >> 9) & 0x3f, (sloc >> 4) & 7,
  1000. val & 0xFF, mask & 0xFF);
  1001. return ret;
  1002. }
  1003. /*
  1004. * Repeat a "store" across all channels of the IB SerDes.
  1005. * Although nominally it inherits the "read value" of the last
  1006. * channel it modified, the only really useful return is <0 for
  1007. * failure, >= 0 for success. The parameter 'loc' is assumed to
  1008. * be the location for the channel-0 copy of the register to
  1009. * be modified.
  1010. */
  1011. static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
  1012. int mask)
  1013. {
  1014. int ret = -1;
  1015. int chnl;
  1016. if (loc & EPB_GLOBAL_WR) {
  1017. /*
  1018. * Our caller has assured us that we can set all four
  1019. * channels at once. Trust that. If mask is not 0xFF,
  1020. * we will read the _specified_ channel for our starting
  1021. * value.
  1022. */
  1023. loc |= (1U << EPB_IB_QUAD0_CS_SHF);
  1024. chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
  1025. if (mask != 0xFF) {
  1026. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
  1027. loc & ~EPB_GLOBAL_WR, 0, 0);
  1028. if (ret < 0) {
  1029. int sloc = loc >> EPB_ADDR_SHF;
  1030. ipath_dev_err(dd, "pre-read failed: elt %d,"
  1031. " addr 0x%X, chnl %d\n", (sloc & 0xF),
  1032. (sloc >> 9) & 0x3f, chnl);
  1033. return ret;
  1034. }
  1035. val = (ret & ~mask) | (val & mask);
  1036. }
  1037. loc &= ~(7 << (4+EPB_ADDR_SHF));
  1038. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
  1039. if (ret < 0) {
  1040. int sloc = loc >> EPB_ADDR_SHF;
  1041. ipath_dev_err(dd, "Global WR failed: elt %d,"
  1042. " addr 0x%X, val %02X\n",
  1043. (sloc & 0xF), (sloc >> 9) & 0x3f, val);
  1044. }
  1045. return ret;
  1046. }
  1047. /* Clear "channel" and set CS so we can simply iterate */
  1048. loc &= ~(7 << (4+EPB_ADDR_SHF));
  1049. loc |= (1U << EPB_IB_QUAD0_CS_SHF);
  1050. for (chnl = 0; chnl < 4; ++chnl) {
  1051. int cloc;
  1052. cloc = loc | (chnl << (4+EPB_ADDR_SHF));
  1053. ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
  1054. if (ret < 0) {
  1055. int sloc = loc >> EPB_ADDR_SHF;
  1056. ipath_dev_err(dd, "Write failed: elt %d,"
  1057. " addr 0x%X, chnl %d, val 0x%02X,"
  1058. " mask 0x%02X\n",
  1059. (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
  1060. val & 0xFF, mask & 0xFF);
  1061. break;
  1062. }
  1063. }
  1064. return ret;
  1065. }
  1066. /*
  1067. * Set the Tx values normally modified by IBC in IB1.2 mode to default
  1068. * values, as gotten from first row of init table.
  1069. */
  1070. static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi)
  1071. {
  1072. int ret;
  1073. int idx, reg, data;
  1074. uint32_t regmap;
  1075. regmap = DDS_REG_MAP;
  1076. for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
  1077. reg = (regmap & 0xF);
  1078. regmap >>= 4;
  1079. data = ddi->reg_vals[idx];
  1080. /* Vendor says RMW not needed for these regs, use 0xFF mask */
  1081. ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF);
  1082. if (ret < 0)
  1083. break;
  1084. }
  1085. return ret;
  1086. }
  1087. /*
  1088. * Set the Rx values normally modified by IBC in IB1.2 mode to default
  1089. * values, as gotten from selected column of init table.
  1090. */
  1091. static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
  1092. {
  1093. int ret;
  1094. int ridx;
  1095. int cnt = ARRAY_SIZE(rxeq_init_vals);
  1096. for (ridx = 0; ridx < cnt; ++ridx) {
  1097. int elt, reg, val, loc;
  1098. elt = rxeq_init_vals[ridx].rdesc & 0xF;
  1099. reg = rxeq_init_vals[ridx].rdesc >> 4;
  1100. loc = EPB_LOC(0, elt, reg);
  1101. val = rxeq_init_vals[ridx].rdata[vsel];
  1102. /* mask of 0xFF, because hardware does full-byte store. */
  1103. ret = ibsd_mod_allchnls(dd, loc, val, 0xFF);
  1104. if (ret < 0)
  1105. break;
  1106. }
  1107. return ret;
  1108. }
  1109. /*
  1110. * Set the default values (row 0) for DDR Driver Demphasis.
  1111. * we do this initially and whenever we turn off IB-1.2
  1112. * The "default" values for Rx equalization are also stored to
  1113. * SerDes registers. Formerly (and still default), we used set 2.
  1114. * For experimenting with cables and link-partners, we allow changing
  1115. * that via a module parameter.
  1116. */
  1117. static unsigned ipath_rxeq_set = 2;
  1118. module_param_named(rxeq_default_set, ipath_rxeq_set, uint,
  1119. S_IWUSR | S_IRUGO);
  1120. MODULE_PARM_DESC(rxeq_default_set,
  1121. "Which set [0..3] of Rx Equalization values is default");
  1122. static int ipath_internal_presets(struct ipath_devdata *dd)
  1123. {
  1124. int ret = 0;
  1125. ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
  1126. if (ret < 0)
  1127. ipath_dev_err(dd, "Failed to set default DDS values\n");
  1128. ret = set_rxeq_vals(dd, ipath_rxeq_set & 3);
  1129. if (ret < 0)
  1130. ipath_dev_err(dd, "Failed to set default RXEQ values\n");
  1131. return ret;
  1132. }
  1133. int ipath_sd7220_presets(struct ipath_devdata *dd)
  1134. {
  1135. int ret = 0;
  1136. if (!dd->ipath_presets_needed)
  1137. return ret;
  1138. dd->ipath_presets_needed = 0;
  1139. /* Assert uC reset, so we don't clash with it. */
  1140. ipath_ibsd_reset(dd, 1);
  1141. udelay(2);
  1142. ipath_sd_trimdone_monitor(dd, "link-down");
  1143. ret = ipath_internal_presets(dd);
  1144. return ret;
  1145. }
  1146. static int ipath_sd_trimself(struct ipath_devdata *dd, int val)
  1147. {
  1148. return ibsd_sto_noisy(dd, CMUCTRL5, val, 0xFF);
  1149. }
  1150. static int ipath_sd_early(struct ipath_devdata *dd)
  1151. {
  1152. int ret = -1; /* Default failed */
  1153. int chnl;
  1154. for (chnl = 0; chnl < 4; ++chnl) {
  1155. ret = ibsd_sto_noisy(dd, RXHSCTRL0(chnl), 0xD4, 0xFF);
  1156. if (ret < 0)
  1157. goto bail;
  1158. }
  1159. for (chnl = 0; chnl < 4; ++chnl) {
  1160. ret = ibsd_sto_noisy(dd, VCDL_DAC2(chnl), 0x2D, 0xFF);
  1161. if (ret < 0)
  1162. goto bail;
  1163. }
  1164. /* more fine-tuning of what will be default */
  1165. for (chnl = 0; chnl < 4; ++chnl) {
  1166. ret = ibsd_sto_noisy(dd, VCDL_CTRL2(chnl), 3, 0xF);
  1167. if (ret < 0)
  1168. goto bail;
  1169. }
  1170. for (chnl = 0; chnl < 4; ++chnl) {
  1171. ret = ibsd_sto_noisy(dd, START_EQ1(chnl), 0x10, 0xFF);
  1172. if (ret < 0)
  1173. goto bail;
  1174. }
  1175. for (chnl = 0; chnl < 4; ++chnl) {
  1176. ret = ibsd_sto_noisy(dd, START_EQ2(chnl), 0x30, 0xFF);
  1177. if (ret < 0)
  1178. goto bail;
  1179. }
  1180. bail:
  1181. return ret;
  1182. }
  1183. #define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E)
  1184. #define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
  1185. #define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
  1186. static int ipath_sd_dactrim(struct ipath_devdata *dd)
  1187. {
  1188. int ret = -1; /* Default failed */
  1189. int chnl;
  1190. for (chnl = 0; chnl < 4; ++chnl) {
  1191. ret = ibsd_sto_noisy(dd, BACTRL(chnl), 0x40, 0xFF);
  1192. if (ret < 0)
  1193. goto bail;
  1194. }
  1195. for (chnl = 0; chnl < 4; ++chnl) {
  1196. ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x04, 0xFF);
  1197. if (ret < 0)
  1198. goto bail;
  1199. }
  1200. for (chnl = 0; chnl < 4; ++chnl) {
  1201. ret = ibsd_sto_noisy(dd, RXHSSTATUS(chnl), 0x04, 0xFF);
  1202. if (ret < 0)
  1203. goto bail;
  1204. }
  1205. /*
  1206. * delay for max possible number of steps, with slop.
  1207. * Each step is about 4usec.
  1208. */
  1209. udelay(415);
  1210. for (chnl = 0; chnl < 4; ++chnl) {
  1211. ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x00, 0xFF);
  1212. if (ret < 0)
  1213. goto bail;
  1214. }
  1215. bail:
  1216. return ret;
  1217. }
  1218. #define RELOCK_FIRST_MS 3
  1219. #define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
  1220. void ipath_toggle_rclkrls(struct ipath_devdata *dd)
  1221. {
  1222. int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
  1223. int ret;
  1224. ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
  1225. if (ret < 0)
  1226. ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
  1227. else {
  1228. udelay(1);
  1229. ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
  1230. }
  1231. /* And again for good measure */
  1232. udelay(1);
  1233. ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
  1234. if (ret < 0)
  1235. ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
  1236. else {
  1237. udelay(1);
  1238. ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
  1239. }
  1240. /* Now reset xgxs and IBC to complete the recovery */
  1241. dd->ipath_f_xgxs_reset(dd);
  1242. }
  1243. /*
  1244. * Shut down the timer that polls for relock occasions, if needed
  1245. * this is "hooked" from ipath_7220_quiet_serdes(), which is called
  1246. * just before ipath_shutdown_device() in ipath_driver.c shuts down all
  1247. * the other timers
  1248. */
  1249. void ipath_shutdown_relock_poll(struct ipath_devdata *dd)
  1250. {
  1251. struct ipath_relock *irp = &dd->ipath_relock_singleton;
  1252. if (atomic_read(&irp->ipath_relock_timer_active)) {
  1253. del_timer_sync(&irp->ipath_relock_timer);
  1254. atomic_set(&irp->ipath_relock_timer_active, 0);
  1255. }
  1256. }
  1257. static unsigned ipath_relock_by_timer = 1;
  1258. module_param_named(relock_by_timer, ipath_relock_by_timer, uint,
  1259. S_IWUSR | S_IRUGO);
  1260. MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
  1261. static void ipath_run_relock(unsigned long opaque)
  1262. {
  1263. struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
  1264. struct ipath_relock *irp = &dd->ipath_relock_singleton;
  1265. u64 val, ltstate;
  1266. if (!(dd->ipath_flags & IPATH_INITTED)) {
  1267. /* Not yet up, just reenable the timer for later */
  1268. irp->ipath_relock_interval = HZ;
  1269. mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
  1270. return;
  1271. }
  1272. /*
  1273. * Check link-training state for "stuck" state.
  1274. * if found, try relock and schedule another try at
  1275. * exponentially growing delay, maxed at one second.
  1276. * if not stuck, our work is done.
  1277. */
  1278. val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
  1279. ltstate = ipath_ib_linktrstate(dd, val);
  1280. if (ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT
  1281. && ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
  1282. int timeoff;
  1283. /* Not up yet. Try again, if allowed by module-param */
  1284. if (ipath_relock_by_timer) {
  1285. if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
  1286. ipath_cdbg(VERBOSE, "Skip RELOCK in AUTONEG\n");
  1287. else if (!(dd->ipath_flags & IPATH_IB_LINK_DISABLED)) {
  1288. ipath_cdbg(VERBOSE, "RELOCK\n");
  1289. ipath_toggle_rclkrls(dd);
  1290. }
  1291. }
  1292. /* re-set timer for next check */
  1293. timeoff = irp->ipath_relock_interval << 1;
  1294. if (timeoff > HZ)
  1295. timeoff = HZ;
  1296. irp->ipath_relock_interval = timeoff;
  1297. mod_timer(&irp->ipath_relock_timer, jiffies + timeoff);
  1298. } else {
  1299. /* Up, so no more need to check so often */
  1300. mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
  1301. }
  1302. }
  1303. void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup)
  1304. {
  1305. struct ipath_relock *irp = &dd->ipath_relock_singleton;
  1306. if (ibup > 0) {
  1307. /* we are now up, so relax timer to 1 second interval */
  1308. if (atomic_read(&irp->ipath_relock_timer_active))
  1309. mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
  1310. } else {
  1311. /* Transition to down, (re-)set timer to short interval. */
  1312. int timeout;
  1313. timeout = (HZ * ((ibup == -1) ? 1000 : RELOCK_FIRST_MS))/1000;
  1314. if (timeout == 0)
  1315. timeout = 1;
  1316. /* If timer has not yet been started, do so. */
  1317. if (atomic_inc_return(&irp->ipath_relock_timer_active) == 1) {
  1318. init_timer(&irp->ipath_relock_timer);
  1319. irp->ipath_relock_timer.function = ipath_run_relock;
  1320. irp->ipath_relock_timer.data = (unsigned long) dd;
  1321. irp->ipath_relock_interval = timeout;
  1322. irp->ipath_relock_timer.expires = jiffies + timeout;
  1323. add_timer(&irp->ipath_relock_timer);
  1324. } else {
  1325. irp->ipath_relock_interval = timeout;
  1326. mod_timer(&irp->ipath_relock_timer, jiffies + timeout);
  1327. atomic_dec(&irp->ipath_relock_timer_active);
  1328. }
  1329. }
  1330. }