PageRenderTime 24ms CodeModel.GetById 32ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/edac/i3200_edac.c

https://github.com/tklauser/linux-nios2
C | 550 lines | 370 code | 111 blank | 69 comment | 42 complexity | dda666f59ee507adadfa73102d303565 MD5 | raw file
  1. /*
  2. * Intel 3200/3210 Memory Controller kernel module
  3. * Copyright (C) 2008-2009 Akamai Technologies, Inc.
  4. * Portions by Hitoshi Mitake <h.mitake@gmail.com>.
  5. *
  6. * This file may be distributed under the terms of the
  7. * GNU General Public License.
  8. */
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <linux/pci.h>
  12. #include <linux/pci_ids.h>
  13. #include <linux/edac.h>
  14. #include <linux/io.h>
  15. #include "edac_module.h"
  16. #include <linux/io-64-nonatomic-lo-hi.h>
  17. #define I3200_REVISION "1.1"
  18. #define EDAC_MOD_STR "i3200_edac"
  19. #define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
  20. #define I3200_DIMMS 4
  21. #define I3200_RANKS 8
  22. #define I3200_RANKS_PER_CHANNEL 4
  23. #define I3200_CHANNELS 2
  24. /* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */
  25. #define I3200_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
  26. #define I3200_MCHBAR_HIGH 0x4c
  27. #define I3200_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
  28. #define I3200_MMR_WINDOW_SIZE 16384
  29. #define I3200_TOM 0xa0 /* Top of Memory (16b)
  30. *
  31. * 15:10 reserved
  32. * 9:0 total populated physical memory
  33. */
  34. #define I3200_TOM_MASK 0x3ff /* bits 9:0 */
  35. #define I3200_TOM_SHIFT 26 /* 64MiB grain */
  36. #define I3200_ERRSTS 0xc8 /* Error Status Register (16b)
  37. *
  38. * 15 reserved
  39. * 14 Isochronous TBWRR Run Behind FIFO Full
  40. * (ITCV)
  41. * 13 Isochronous TBWRR Run Behind FIFO Put
  42. * (ITSTV)
  43. * 12 reserved
  44. * 11 MCH Thermal Sensor Event
  45. * for SMI/SCI/SERR (GTSE)
  46. * 10 reserved
  47. * 9 LOCK to non-DRAM Memory Flag (LCKF)
  48. * 8 reserved
  49. * 7 DRAM Throttle Flag (DTF)
  50. * 6:2 reserved
  51. * 1 Multi-bit DRAM ECC Error Flag (DMERR)
  52. * 0 Single-bit DRAM ECC Error Flag (DSERR)
  53. */
  54. #define I3200_ERRSTS_UE 0x0002
  55. #define I3200_ERRSTS_CE 0x0001
  56. #define I3200_ERRSTS_BITS (I3200_ERRSTS_UE | I3200_ERRSTS_CE)
  57. /* Intel MMIO register space - device 0 function 0 - MMR space */
  58. #define I3200_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
  59. *
  60. * 15:10 reserved
  61. * 9:0 Channel 0 DRAM Rank Boundary Address
  62. */
  63. #define I3200_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
  64. #define I3200_DRB_MASK 0x3ff /* bits 9:0 */
  65. #define I3200_DRB_SHIFT 26 /* 64MiB grain */
  66. #define I3200_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
  67. *
  68. * 63:48 Error Column Address (ERRCOL)
  69. * 47:32 Error Row Address (ERRROW)
  70. * 31:29 Error Bank Address (ERRBANK)
  71. * 28:27 Error Rank Address (ERRRANK)
  72. * 26:24 reserved
  73. * 23:16 Error Syndrome (ERRSYND)
  74. * 15: 2 reserved
  75. * 1 Multiple Bit Error Status (MERRSTS)
  76. * 0 Correctable Error Status (CERRSTS)
  77. */
  78. #define I3200_C1ECCERRLOG 0x680 /* Chan 1 ECC Error Log (64b) */
  79. #define I3200_ECCERRLOG_CE 0x1
  80. #define I3200_ECCERRLOG_UE 0x2
  81. #define I3200_ECCERRLOG_RANK_BITS 0x18000000
  82. #define I3200_ECCERRLOG_RANK_SHIFT 27
  83. #define I3200_ECCERRLOG_SYNDROME_BITS 0xff0000
  84. #define I3200_ECCERRLOG_SYNDROME_SHIFT 16
  85. #define I3200_CAPID0 0xe0 /* P.95 of spec for details */
  86. struct i3200_priv {
  87. void __iomem *window;
  88. };
  89. static int nr_channels;
  90. static int how_many_channels(struct pci_dev *pdev)
  91. {
  92. int n_channels;
  93. unsigned char capid0_8b; /* 8th byte of CAPID0 */
  94. pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
  95. if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
  96. edac_dbg(0, "In single channel mode\n");
  97. n_channels = 1;
  98. } else {
  99. edac_dbg(0, "In dual channel mode\n");
  100. n_channels = 2;
  101. }
  102. if (capid0_8b & 0x10) /* check if both channels are filled */
  103. edac_dbg(0, "2 DIMMS per channel disabled\n");
  104. else
  105. edac_dbg(0, "2 DIMMS per channel enabled\n");
  106. return n_channels;
  107. }
  108. static unsigned long eccerrlog_syndrome(u64 log)
  109. {
  110. return (log & I3200_ECCERRLOG_SYNDROME_BITS) >>
  111. I3200_ECCERRLOG_SYNDROME_SHIFT;
  112. }
  113. static int eccerrlog_row(int channel, u64 log)
  114. {
  115. u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >>
  116. I3200_ECCERRLOG_RANK_SHIFT);
  117. return rank | (channel * I3200_RANKS_PER_CHANNEL);
  118. }
  119. enum i3200_chips {
  120. I3200 = 0,
  121. };
  122. struct i3200_dev_info {
  123. const char *ctl_name;
  124. };
  125. struct i3200_error_info {
  126. u16 errsts;
  127. u16 errsts2;
  128. u64 eccerrlog[I3200_CHANNELS];
  129. };
  130. static const struct i3200_dev_info i3200_devs[] = {
  131. [I3200] = {
  132. .ctl_name = "i3200"
  133. },
  134. };
  135. static struct pci_dev *mci_pdev;
  136. static int i3200_registered = 1;
  137. static void i3200_clear_error_info(struct mem_ctl_info *mci)
  138. {
  139. struct pci_dev *pdev;
  140. pdev = to_pci_dev(mci->pdev);
  141. /*
  142. * Clear any error bits.
  143. * (Yes, we really clear bits by writing 1 to them.)
  144. */
  145. pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS,
  146. I3200_ERRSTS_BITS);
  147. }
  148. static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci,
  149. struct i3200_error_info *info)
  150. {
  151. struct pci_dev *pdev;
  152. struct i3200_priv *priv = mci->pvt_info;
  153. void __iomem *window = priv->window;
  154. pdev = to_pci_dev(mci->pdev);
  155. /*
  156. * This is a mess because there is no atomic way to read all the
  157. * registers at once and the registers can transition from CE being
  158. * overwritten by UE.
  159. */
  160. pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts);
  161. if (!(info->errsts & I3200_ERRSTS_BITS))
  162. return;
  163. info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
  164. if (nr_channels == 2)
  165. info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
  166. pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2);
  167. /*
  168. * If the error is the same for both reads then the first set
  169. * of reads is valid. If there is a change then there is a CE
  170. * with no info and the second set of reads is valid and
  171. * should be UE info.
  172. */
  173. if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
  174. info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
  175. if (nr_channels == 2)
  176. info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
  177. }
  178. i3200_clear_error_info(mci);
  179. }
  180. static void i3200_process_error_info(struct mem_ctl_info *mci,
  181. struct i3200_error_info *info)
  182. {
  183. int channel;
  184. u64 log;
  185. if (!(info->errsts & I3200_ERRSTS_BITS))
  186. return;
  187. if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
  188. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
  189. -1, -1, -1, "UE overwrote CE", "");
  190. info->errsts = info->errsts2;
  191. }
  192. for (channel = 0; channel < nr_channels; channel++) {
  193. log = info->eccerrlog[channel];
  194. if (log & I3200_ECCERRLOG_UE) {
  195. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  196. 0, 0, 0,
  197. eccerrlog_row(channel, log),
  198. -1, -1,
  199. "i3000 UE", "");
  200. } else if (log & I3200_ECCERRLOG_CE) {
  201. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  202. 0, 0, eccerrlog_syndrome(log),
  203. eccerrlog_row(channel, log),
  204. -1, -1,
  205. "i3000 CE", "");
  206. }
  207. }
  208. }
  209. static void i3200_check(struct mem_ctl_info *mci)
  210. {
  211. struct i3200_error_info info;
  212. edac_dbg(1, "MC%d\n", mci->mc_idx);
  213. i3200_get_and_clear_error_info(mci, &info);
  214. i3200_process_error_info(mci, &info);
  215. }
  216. static void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
  217. {
  218. union {
  219. u64 mchbar;
  220. struct {
  221. u32 mchbar_low;
  222. u32 mchbar_high;
  223. };
  224. } u;
  225. void __iomem *window;
  226. pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low);
  227. pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high);
  228. u.mchbar &= I3200_MCHBAR_MASK;
  229. if (u.mchbar != (resource_size_t)u.mchbar) {
  230. printk(KERN_ERR
  231. "i3200: mmio space beyond accessible range (0x%llx)\n",
  232. (unsigned long long)u.mchbar);
  233. return NULL;
  234. }
  235. window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE);
  236. if (!window)
  237. printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n",
  238. (unsigned long long)u.mchbar);
  239. return window;
  240. }
  241. static void i3200_get_drbs(void __iomem *window,
  242. u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
  243. {
  244. int i;
  245. for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) {
  246. drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK;
  247. drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK;
  248. edac_dbg(0, "drb[0][%d] = %d, drb[1][%d] = %d\n", i, drbs[0][i], i, drbs[1][i]);
  249. }
  250. }
  251. static bool i3200_is_stacked(struct pci_dev *pdev,
  252. u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
  253. {
  254. u16 tom;
  255. pci_read_config_word(pdev, I3200_TOM, &tom);
  256. tom &= I3200_TOM_MASK;
  257. return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom;
  258. }
  259. static unsigned long drb_to_nr_pages(
  260. u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked,
  261. int channel, int rank)
  262. {
  263. int n;
  264. n = drbs[channel][rank];
  265. if (!n)
  266. return 0;
  267. if (rank > 0)
  268. n -= drbs[channel][rank - 1];
  269. if (stacked && (channel == 1) &&
  270. drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1])
  271. n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1];
  272. n <<= (I3200_DRB_SHIFT - PAGE_SHIFT);
  273. return n;
  274. }
  275. static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
  276. {
  277. int rc;
  278. int i, j;
  279. struct mem_ctl_info *mci = NULL;
  280. struct edac_mc_layer layers[2];
  281. u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
  282. bool stacked;
  283. void __iomem *window;
  284. struct i3200_priv *priv;
  285. edac_dbg(0, "MC:\n");
  286. window = i3200_map_mchbar(pdev);
  287. if (!window)
  288. return -ENODEV;
  289. i3200_get_drbs(window, drbs);
  290. nr_channels = how_many_channels(pdev);
  291. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  292. layers[0].size = I3200_DIMMS;
  293. layers[0].is_virt_csrow = true;
  294. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  295. layers[1].size = nr_channels;
  296. layers[1].is_virt_csrow = false;
  297. mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
  298. sizeof(struct i3200_priv));
  299. if (!mci)
  300. return -ENOMEM;
  301. edac_dbg(3, "MC: init mci\n");
  302. mci->pdev = &pdev->dev;
  303. mci->mtype_cap = MEM_FLAG_DDR2;
  304. mci->edac_ctl_cap = EDAC_FLAG_SECDED;
  305. mci->edac_cap = EDAC_FLAG_SECDED;
  306. mci->mod_name = EDAC_MOD_STR;
  307. mci->mod_ver = I3200_REVISION;
  308. mci->ctl_name = i3200_devs[dev_idx].ctl_name;
  309. mci->dev_name = pci_name(pdev);
  310. mci->edac_check = i3200_check;
  311. mci->ctl_page_to_phys = NULL;
  312. priv = mci->pvt_info;
  313. priv->window = window;
  314. stacked = i3200_is_stacked(pdev, drbs);
  315. /*
  316. * The dram rank boundary (DRB) reg values are boundary addresses
  317. * for each DRAM rank with a granularity of 64MB. DRB regs are
  318. * cumulative; the last one will contain the total memory
  319. * contained in all ranks.
  320. */
  321. for (i = 0; i < I3200_DIMMS; i++) {
  322. unsigned long nr_pages;
  323. for (j = 0; j < nr_channels; j++) {
  324. struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
  325. mci->n_layers, i, j, 0);
  326. nr_pages = drb_to_nr_pages(drbs, stacked, j, i);
  327. if (nr_pages == 0)
  328. continue;
  329. edac_dbg(0, "csrow %d, channel %d%s, size = %ld Mb\n", i, j,
  330. stacked ? " (stacked)" : "", PAGES_TO_MiB(nr_pages));
  331. dimm->nr_pages = nr_pages;
  332. dimm->grain = nr_pages << PAGE_SHIFT;
  333. dimm->mtype = MEM_DDR2;
  334. dimm->dtype = DEV_UNKNOWN;
  335. dimm->edac_mode = EDAC_UNKNOWN;
  336. }
  337. }
  338. i3200_clear_error_info(mci);
  339. rc = -ENODEV;
  340. if (edac_mc_add_mc(mci)) {
  341. edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
  342. goto fail;
  343. }
  344. /* get this far and it's successful */
  345. edac_dbg(3, "MC: success\n");
  346. return 0;
  347. fail:
  348. iounmap(window);
  349. if (mci)
  350. edac_mc_free(mci);
  351. return rc;
  352. }
  353. static int i3200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  354. {
  355. int rc;
  356. edac_dbg(0, "MC:\n");
  357. if (pci_enable_device(pdev) < 0)
  358. return -EIO;
  359. rc = i3200_probe1(pdev, ent->driver_data);
  360. if (!mci_pdev)
  361. mci_pdev = pci_dev_get(pdev);
  362. return rc;
  363. }
  364. static void i3200_remove_one(struct pci_dev *pdev)
  365. {
  366. struct mem_ctl_info *mci;
  367. struct i3200_priv *priv;
  368. edac_dbg(0, "\n");
  369. mci = edac_mc_del_mc(&pdev->dev);
  370. if (!mci)
  371. return;
  372. priv = mci->pvt_info;
  373. iounmap(priv->window);
  374. edac_mc_free(mci);
  375. pci_disable_device(pdev);
  376. }
  377. static const struct pci_device_id i3200_pci_tbl[] = {
  378. {
  379. PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  380. I3200},
  381. {
  382. 0,
  383. } /* 0 terminated list. */
  384. };
  385. MODULE_DEVICE_TABLE(pci, i3200_pci_tbl);
  386. static struct pci_driver i3200_driver = {
  387. .name = EDAC_MOD_STR,
  388. .probe = i3200_init_one,
  389. .remove = i3200_remove_one,
  390. .id_table = i3200_pci_tbl,
  391. };
  392. static int __init i3200_init(void)
  393. {
  394. int pci_rc;
  395. edac_dbg(3, "MC:\n");
  396. /* Ensure that the OPSTATE is set correctly for POLL or NMI */
  397. opstate_init();
  398. pci_rc = pci_register_driver(&i3200_driver);
  399. if (pci_rc < 0)
  400. goto fail0;
  401. if (!mci_pdev) {
  402. i3200_registered = 0;
  403. mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
  404. PCI_DEVICE_ID_INTEL_3200_HB, NULL);
  405. if (!mci_pdev) {
  406. edac_dbg(0, "i3200 pci_get_device fail\n");
  407. pci_rc = -ENODEV;
  408. goto fail1;
  409. }
  410. pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl);
  411. if (pci_rc < 0) {
  412. edac_dbg(0, "i3200 init fail\n");
  413. pci_rc = -ENODEV;
  414. goto fail1;
  415. }
  416. }
  417. return 0;
  418. fail1:
  419. pci_unregister_driver(&i3200_driver);
  420. fail0:
  421. pci_dev_put(mci_pdev);
  422. return pci_rc;
  423. }
  424. static void __exit i3200_exit(void)
  425. {
  426. edac_dbg(3, "MC:\n");
  427. pci_unregister_driver(&i3200_driver);
  428. if (!i3200_registered) {
  429. i3200_remove_one(mci_pdev);
  430. pci_dev_put(mci_pdev);
  431. }
  432. }
  433. module_init(i3200_init);
  434. module_exit(i3200_exit);
  435. MODULE_LICENSE("GPL");
  436. MODULE_AUTHOR("Akamai Technologies, Inc.");
  437. MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers");
  438. module_param(edac_op_state, int, 0444);
  439. MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");