PageRenderTime 67ms CodeModel.GetById 18ms app.highlight 35ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/edac/amd64_edac.c

https://bitbucket.org/cresqo/cm7-p500-kernel
C | 3087 lines | 1869 code | 520 blank | 698 comment | 305 complexity | 2296b64b5b7caa0e645455e3e5c17301 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0

Large files files are truncated, but you can click here to view the full file

   1#include "amd64_edac.h"
   2#include <asm/k8.h>
   3
   4static struct edac_pci_ctl_info *amd64_ctl_pci;
   5
   6static int report_gart_errors;
   7module_param(report_gart_errors, int, 0644);
   8
   9/*
  10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
  11 * cleared to prevent re-enabling the hardware by this driver.
  12 */
  13static int ecc_enable_override;
  14module_param(ecc_enable_override, int, 0644);
  15
  16static struct msr __percpu *msrs;
  17
  18/* Lookup table for all possible MC control instances */
  19struct amd64_pvt;
  20static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
  21static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
  22
  23/*
  24 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
  25 * later.
  26 */
  27static int ddr2_dbam_revCG[] = {
  28			   [0]		= 32,
  29			   [1]		= 64,
  30			   [2]		= 128,
  31			   [3]		= 256,
  32			   [4]		= 512,
  33			   [5]		= 1024,
  34			   [6]		= 2048,
  35};
  36
  37static int ddr2_dbam_revD[] = {
  38			   [0]		= 32,
  39			   [1]		= 64,
  40			   [2 ... 3]	= 128,
  41			   [4]		= 256,
  42			   [5]		= 512,
  43			   [6]		= 256,
  44			   [7]		= 512,
  45			   [8 ... 9]	= 1024,
  46			   [10]		= 2048,
  47};
  48
  49static int ddr2_dbam[] = { [0]		= 128,
  50			   [1]		= 256,
  51			   [2 ... 4]	= 512,
  52			   [5 ... 6]	= 1024,
  53			   [7 ... 8]	= 2048,
  54			   [9 ... 10]	= 4096,
  55			   [11]		= 8192,
  56};
  57
  58static int ddr3_dbam[] = { [0]		= -1,
  59			   [1]		= 256,
  60			   [2]		= 512,
  61			   [3 ... 4]	= -1,
  62			   [5 ... 6]	= 1024,
  63			   [7 ... 8]	= 2048,
  64			   [9 ... 10]	= 4096,
  65			   [11]	= 8192,
  66};
  67
  68/*
  69 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
  70 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
  71 * or higher value'.
  72 *
  73 *FIXME: Produce a better mapping/linearisation.
  74 */
  75
  76struct scrubrate scrubrates[] = {
  77	{ 0x01, 1600000000UL},
  78	{ 0x02, 800000000UL},
  79	{ 0x03, 400000000UL},
  80	{ 0x04, 200000000UL},
  81	{ 0x05, 100000000UL},
  82	{ 0x06, 50000000UL},
  83	{ 0x07, 25000000UL},
  84	{ 0x08, 12284069UL},
  85	{ 0x09, 6274509UL},
  86	{ 0x0A, 3121951UL},
  87	{ 0x0B, 1560975UL},
  88	{ 0x0C, 781440UL},
  89	{ 0x0D, 390720UL},
  90	{ 0x0E, 195300UL},
  91	{ 0x0F, 97650UL},
  92	{ 0x10, 48854UL},
  93	{ 0x11, 24427UL},
  94	{ 0x12, 12213UL},
  95	{ 0x13, 6101UL},
  96	{ 0x14, 3051UL},
  97	{ 0x15, 1523UL},
  98	{ 0x16, 761UL},
  99	{ 0x00, 0UL},        /* scrubbing off */
 100};
 101
 102/*
 103 * Memory scrubber control interface. For K8, memory scrubbing is handled by
 104 * hardware and can involve L2 cache, dcache as well as the main memory. With
 105 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
 106 * functionality.
 107 *
 108 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
 109 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
 110 * bytes/sec for the setting.
 111 *
 112 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
 113 * other archs, we might not have access to the caches directly.
 114 */
 115
 116/*
 117 * scan the scrub rate mapping table for a close or matching bandwidth value to
 118 * issue. If requested is too big, then use last maximum value found.
 119 */
 120static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw,
 121				       u32 min_scrubrate)
 122{
 123	u32 scrubval;
 124	int i;
 125
 126	/*
 127	 * map the configured rate (new_bw) to a value specific to the AMD64
 128	 * memory controller and apply to register. Search for the first
 129	 * bandwidth entry that is greater or equal than the setting requested
 130	 * and program that. If at last entry, turn off DRAM scrubbing.
 131	 */
 132	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
 133		/*
 134		 * skip scrub rates which aren't recommended
 135		 * (see F10 BKDG, F3x58)
 136		 */
 137		if (scrubrates[i].scrubval < min_scrubrate)
 138			continue;
 139
 140		if (scrubrates[i].bandwidth <= new_bw)
 141			break;
 142
 143		/*
 144		 * if no suitable bandwidth found, turn off DRAM scrubbing
 145		 * entirely by falling back to the last element in the
 146		 * scrubrates array.
 147		 */
 148	}
 149
 150	scrubval = scrubrates[i].scrubval;
 151	if (scrubval)
 152		edac_printk(KERN_DEBUG, EDAC_MC,
 153			    "Setting scrub rate bandwidth: %u\n",
 154			    scrubrates[i].bandwidth);
 155	else
 156		edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n");
 157
 158	pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
 159
 160	return 0;
 161}
 162
 163static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth)
 164{
 165	struct amd64_pvt *pvt = mci->pvt_info;
 166	u32 min_scrubrate = 0x0;
 167
 168	switch (boot_cpu_data.x86) {
 169	case 0xf:
 170		min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
 171		break;
 172	case 0x10:
 173		min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
 174		break;
 175	case 0x11:
 176		min_scrubrate = F11_MIN_SCRUB_RATE_BITS;
 177		break;
 178
 179	default:
 180		amd64_printk(KERN_ERR, "Unsupported family!\n");
 181		return -EINVAL;
 182	}
 183	return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
 184			min_scrubrate);
 185}
 186
 187static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
 188{
 189	struct amd64_pvt *pvt = mci->pvt_info;
 190	u32 scrubval = 0;
 191	int status = -1, i;
 192
 193	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
 194
 195	scrubval = scrubval & 0x001F;
 196
 197	edac_printk(KERN_DEBUG, EDAC_MC,
 198		    "pci-read, sdram scrub control value: %d \n", scrubval);
 199
 200	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
 201		if (scrubrates[i].scrubval == scrubval) {
 202			*bw = scrubrates[i].bandwidth;
 203			status = 0;
 204			break;
 205		}
 206	}
 207
 208	return status;
 209}
 210
 211/* Map from a CSROW entry to the mask entry that operates on it */
 212static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
 213{
 214	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
 215		return csrow;
 216	else
 217		return csrow >> 1;
 218}
 219
 220/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
 221static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
 222{
 223	if (dct == 0)
 224		return pvt->dcsb0[csrow];
 225	else
 226		return pvt->dcsb1[csrow];
 227}
 228
 229/*
 230 * Return the 'mask' address the i'th CS entry. This function is needed because
 231 * there number of DCSM registers on Rev E and prior vs Rev F and later is
 232 * different.
 233 */
 234static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
 235{
 236	if (dct == 0)
 237		return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
 238	else
 239		return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
 240}
 241
 242
 243/*
 244 * In *base and *limit, pass back the full 40-bit base and limit physical
 245 * addresses for the node given by node_id.  This information is obtained from
 246 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
 247 * base and limit addresses are of type SysAddr, as defined at the start of
 248 * section 3.4.4 (p. 70).  They are the lowest and highest physical addresses
 249 * in the address range they represent.
 250 */
 251static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
 252			       u64 *base, u64 *limit)
 253{
 254	*base = pvt->dram_base[node_id];
 255	*limit = pvt->dram_limit[node_id];
 256}
 257
 258/*
 259 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
 260 * with node_id
 261 */
 262static int amd64_base_limit_match(struct amd64_pvt *pvt,
 263					u64 sys_addr, int node_id)
 264{
 265	u64 base, limit, addr;
 266
 267	amd64_get_base_and_limit(pvt, node_id, &base, &limit);
 268
 269	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
 270	 * all ones if the most significant implemented address bit is 1.
 271	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
 272	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
 273	 * Application Programming.
 274	 */
 275	addr = sys_addr & 0x000000ffffffffffull;
 276
 277	return (addr >= base) && (addr <= limit);
 278}
 279
 280/*
 281 * Attempt to map a SysAddr to a node. On success, return a pointer to the
 282 * mem_ctl_info structure for the node that the SysAddr maps to.
 283 *
 284 * On failure, return NULL.
 285 */
 286static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
 287						u64 sys_addr)
 288{
 289	struct amd64_pvt *pvt;
 290	int node_id;
 291	u32 intlv_en, bits;
 292
 293	/*
 294	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
 295	 * 3.4.4.2) registers to map the SysAddr to a node ID.
 296	 */
 297	pvt = mci->pvt_info;
 298
 299	/*
 300	 * The value of this field should be the same for all DRAM Base
 301	 * registers.  Therefore we arbitrarily choose to read it from the
 302	 * register for node 0.
 303	 */
 304	intlv_en = pvt->dram_IntlvEn[0];
 305
 306	if (intlv_en == 0) {
 307		for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
 308			if (amd64_base_limit_match(pvt, sys_addr, node_id))
 309				goto found;
 310		}
 311		goto err_no_match;
 312	}
 313
 314	if (unlikely((intlv_en != 0x01) &&
 315		     (intlv_en != 0x03) &&
 316		     (intlv_en != 0x07))) {
 317		amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
 318			     "IntlvEn field of DRAM Base Register for node 0: "
 319			     "this probably indicates a BIOS bug.\n", intlv_en);
 320		return NULL;
 321	}
 322
 323	bits = (((u32) sys_addr) >> 12) & intlv_en;
 324
 325	for (node_id = 0; ; ) {
 326		if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
 327			break;	/* intlv_sel field matches */
 328
 329		if (++node_id >= DRAM_REG_COUNT)
 330			goto err_no_match;
 331	}
 332
 333	/* sanity test for sys_addr */
 334	if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
 335		amd64_printk(KERN_WARNING,
 336			     "%s(): sys_addr 0x%llx falls outside base/limit "
 337			     "address range for node %d with node interleaving "
 338			     "enabled.\n",
 339			     __func__, sys_addr, node_id);
 340		return NULL;
 341	}
 342
 343found:
 344	return edac_mc_find(node_id);
 345
 346err_no_match:
 347	debugf2("sys_addr 0x%lx doesn't match any node\n",
 348		(unsigned long)sys_addr);
 349
 350	return NULL;
 351}
 352
 353/*
 354 * Extract the DRAM CS base address from selected csrow register.
 355 */
 356static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
 357{
 358	return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
 359				pvt->dcs_shift;
 360}
 361
 362/*
 363 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
 364 */
 365static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
 366{
 367	u64 dcsm_bits, other_bits;
 368	u64 mask;
 369
 370	/* Extract bits from DRAM CS Mask. */
 371	dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
 372
 373	other_bits = pvt->dcsm_mask;
 374	other_bits = ~(other_bits << pvt->dcs_shift);
 375
 376	/*
 377	 * The extracted bits from DCSM belong in the spaces represented by
 378	 * the cleared bits in other_bits.
 379	 */
 380	mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
 381
 382	return mask;
 383}
 384
 385/*
 386 * @input_addr is an InputAddr associated with the node given by mci. Return the
 387 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
 388 */
 389static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
 390{
 391	struct amd64_pvt *pvt;
 392	int csrow;
 393	u64 base, mask;
 394
 395	pvt = mci->pvt_info;
 396
 397	/*
 398	 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
 399	 * base/mask register pair, test the condition shown near the start of
 400	 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
 401	 */
 402	for (csrow = 0; csrow < pvt->cs_count; csrow++) {
 403
 404		/* This DRAM chip select is disabled on this node */
 405		if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
 406			continue;
 407
 408		base = base_from_dct_base(pvt, csrow);
 409		mask = ~mask_from_dct_mask(pvt, csrow);
 410
 411		if ((input_addr & mask) == (base & mask)) {
 412			debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
 413				(unsigned long)input_addr, csrow,
 414				pvt->mc_node_id);
 415
 416			return csrow;
 417		}
 418	}
 419
 420	debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
 421		(unsigned long)input_addr, pvt->mc_node_id);
 422
 423	return -1;
 424}
 425
 426/*
 427 * Return the base value defined by the DRAM Base register for the node
 428 * represented by mci.  This function returns the full 40-bit value despite the
 429 * fact that the register only stores bits 39-24 of the value. See section
 430 * 3.4.4.1 (BKDG #26094, K8, revA-E)
 431 */
 432static inline u64 get_dram_base(struct mem_ctl_info *mci)
 433{
 434	struct amd64_pvt *pvt = mci->pvt_info;
 435
 436	return pvt->dram_base[pvt->mc_node_id];
 437}
 438
 439/*
 440 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
 441 * for the node represented by mci. Info is passed back in *hole_base,
 442 * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
 443 * info is invalid. Info may be invalid for either of the following reasons:
 444 *
 445 * - The revision of the node is not E or greater.  In this case, the DRAM Hole
 446 *   Address Register does not exist.
 447 *
 448 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
 449 *   indicating that its contents are not valid.
 450 *
 451 * The values passed back in *hole_base, *hole_offset, and *hole_size are
 452 * complete 32-bit values despite the fact that the bitfields in the DHAR
 453 * only represent bits 31-24 of the base and offset values.
 454 */
 455int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
 456			     u64 *hole_offset, u64 *hole_size)
 457{
 458	struct amd64_pvt *pvt = mci->pvt_info;
 459	u64 base;
 460
 461	/* only revE and later have the DRAM Hole Address Register */
 462	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
 463		debugf1("  revision %d for node %d does not support DHAR\n",
 464			pvt->ext_model, pvt->mc_node_id);
 465		return 1;
 466	}
 467
 468	/* only valid for Fam10h */
 469	if (boot_cpu_data.x86 == 0x10 &&
 470	    (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
 471		debugf1("  Dram Memory Hoisting is DISABLED on this system\n");
 472		return 1;
 473	}
 474
 475	if ((pvt->dhar & DHAR_VALID) == 0) {
 476		debugf1("  Dram Memory Hoisting is DISABLED on this node %d\n",
 477			pvt->mc_node_id);
 478		return 1;
 479	}
 480
 481	/* This node has Memory Hoisting */
 482
 483	/* +------------------+--------------------+--------------------+-----
 484	 * | memory           | DRAM hole          | relocated          |
 485	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
 486	 * |                  |                    | DRAM hole          |
 487	 * |                  |                    | [0x100000000,      |
 488	 * |                  |                    |  (0x100000000+     |
 489	 * |                  |                    |   (0xffffffff-x))] |
 490	 * +------------------+--------------------+--------------------+-----
 491	 *
 492	 * Above is a diagram of physical memory showing the DRAM hole and the
 493	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
 494	 * starts at address x (the base address) and extends through address
 495	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
 496	 * addresses in the hole so that they start at 0x100000000.
 497	 */
 498
 499	base = dhar_base(pvt->dhar);
 500
 501	*hole_base = base;
 502	*hole_size = (0x1ull << 32) - base;
 503
 504	if (boot_cpu_data.x86 > 0xf)
 505		*hole_offset = f10_dhar_offset(pvt->dhar);
 506	else
 507		*hole_offset = k8_dhar_offset(pvt->dhar);
 508
 509	debugf1("  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
 510		pvt->mc_node_id, (unsigned long)*hole_base,
 511		(unsigned long)*hole_offset, (unsigned long)*hole_size);
 512
 513	return 0;
 514}
 515EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
 516
 517/*
 518 * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
 519 * assumed that sys_addr maps to the node given by mci.
 520 *
 521 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
 522 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
 523 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
 524 * then it is also involved in translating a SysAddr to a DramAddr. Sections
 525 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
 526 * These parts of the documentation are unclear. I interpret them as follows:
 527 *
 528 * When node n receives a SysAddr, it processes the SysAddr as follows:
 529 *
 530 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
 531 *    Limit registers for node n. If the SysAddr is not within the range
 532 *    specified by the base and limit values, then node n ignores the Sysaddr
 533 *    (since it does not map to node n). Otherwise continue to step 2 below.
 534 *
 535 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
 536 *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
 537 *    the range of relocated addresses (starting at 0x100000000) from the DRAM
 538 *    hole. If not, skip to step 3 below. Else get the value of the
 539 *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
 540 *    offset defined by this value from the SysAddr.
 541 *
 542 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
 543 *    Base register for node n. To obtain the DramAddr, subtract the base
 544 *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
 545 */
 546static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
 547{
 548	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
 549	int ret = 0;
 550
 551	dram_base = get_dram_base(mci);
 552
 553	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
 554				      &hole_size);
 555	if (!ret) {
 556		if ((sys_addr >= (1ull << 32)) &&
 557		    (sys_addr < ((1ull << 32) + hole_size))) {
 558			/* use DHAR to translate SysAddr to DramAddr */
 559			dram_addr = sys_addr - hole_offset;
 560
 561			debugf2("using DHAR to translate SysAddr 0x%lx to "
 562				"DramAddr 0x%lx\n",
 563				(unsigned long)sys_addr,
 564				(unsigned long)dram_addr);
 565
 566			return dram_addr;
 567		}
 568	}
 569
 570	/*
 571	 * Translate the SysAddr to a DramAddr as shown near the start of
 572	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
 573	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
 574	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
 575	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
 576	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
 577	 * Programmer's Manual Volume 1 Application Programming.
 578	 */
 579	dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
 580
 581	debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
 582		"DramAddr 0x%lx\n", (unsigned long)sys_addr,
 583		(unsigned long)dram_addr);
 584	return dram_addr;
 585}
 586
 587/*
 588 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
 589 * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
 590 * for node interleaving.
 591 */
 592static int num_node_interleave_bits(unsigned intlv_en)
 593{
 594	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
 595	int n;
 596
 597	BUG_ON(intlv_en > 7);
 598	n = intlv_shift_table[intlv_en];
 599	return n;
 600}
 601
 602/* Translate the DramAddr given by @dram_addr to an InputAddr. */
 603static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
 604{
 605	struct amd64_pvt *pvt;
 606	int intlv_shift;
 607	u64 input_addr;
 608
 609	pvt = mci->pvt_info;
 610
 611	/*
 612	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
 613	 * concerning translating a DramAddr to an InputAddr.
 614	 */
 615	intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
 616	input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
 617	    (dram_addr & 0xfff);
 618
 619	debugf2("  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
 620		intlv_shift, (unsigned long)dram_addr,
 621		(unsigned long)input_addr);
 622
 623	return input_addr;
 624}
 625
 626/*
 627 * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
 628 * assumed that @sys_addr maps to the node given by mci.
 629 */
 630static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
 631{
 632	u64 input_addr;
 633
 634	input_addr =
 635	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
 636
 637	debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
 638		(unsigned long)sys_addr, (unsigned long)input_addr);
 639
 640	return input_addr;
 641}
 642
 643
 644/*
 645 * @input_addr is an InputAddr associated with the node represented by mci.
 646 * Translate @input_addr to a DramAddr and return the result.
 647 */
 648static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
 649{
 650	struct amd64_pvt *pvt;
 651	int node_id, intlv_shift;
 652	u64 bits, dram_addr;
 653	u32 intlv_sel;
 654
 655	/*
 656	 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
 657	 * shows how to translate a DramAddr to an InputAddr. Here we reverse
 658	 * this procedure. When translating from a DramAddr to an InputAddr, the
 659	 * bits used for node interleaving are discarded.  Here we recover these
 660	 * bits from the IntlvSel field of the DRAM Limit register (section
 661	 * 3.4.4.2) for the node that input_addr is associated with.
 662	 */
 663	pvt = mci->pvt_info;
 664	node_id = pvt->mc_node_id;
 665	BUG_ON((node_id < 0) || (node_id > 7));
 666
 667	intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
 668
 669	if (intlv_shift == 0) {
 670		debugf1("    InputAddr 0x%lx translates to DramAddr of "
 671			"same value\n",	(unsigned long)input_addr);
 672
 673		return input_addr;
 674	}
 675
 676	bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
 677	    (input_addr & 0xfff);
 678
 679	intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
 680	dram_addr = bits + (intlv_sel << 12);
 681
 682	debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
 683		"(%d node interleave bits)\n", (unsigned long)input_addr,
 684		(unsigned long)dram_addr, intlv_shift);
 685
 686	return dram_addr;
 687}
 688
 689/*
 690 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
 691 * @dram_addr to a SysAddr.
 692 */
 693static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
 694{
 695	struct amd64_pvt *pvt = mci->pvt_info;
 696	u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
 697	int ret = 0;
 698
 699	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
 700				      &hole_size);
 701	if (!ret) {
 702		if ((dram_addr >= hole_base) &&
 703		    (dram_addr < (hole_base + hole_size))) {
 704			sys_addr = dram_addr + hole_offset;
 705
 706			debugf1("using DHAR to translate DramAddr 0x%lx to "
 707				"SysAddr 0x%lx\n", (unsigned long)dram_addr,
 708				(unsigned long)sys_addr);
 709
 710			return sys_addr;
 711		}
 712	}
 713
 714	amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
 715	sys_addr = dram_addr + base;
 716
 717	/*
 718	 * The sys_addr we have computed up to this point is a 40-bit value
 719	 * because the k8 deals with 40-bit values.  However, the value we are
 720	 * supposed to return is a full 64-bit physical address.  The AMD
 721	 * x86-64 architecture specifies that the most significant implemented
 722	 * address bit through bit 63 of a physical address must be either all
 723	 * 0s or all 1s.  Therefore we sign-extend the 40-bit sys_addr to a
 724	 * 64-bit value below.  See section 3.4.2 of AMD publication 24592:
 725	 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
 726	 * Programming.
 727	 */
 728	sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
 729
 730	debugf1("    Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
 731		pvt->mc_node_id, (unsigned long)dram_addr,
 732		(unsigned long)sys_addr);
 733
 734	return sys_addr;
 735}
 736
 737/*
 738 * @input_addr is an InputAddr associated with the node given by mci. Translate
 739 * @input_addr to a SysAddr.
 740 */
 741static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
 742					 u64 input_addr)
 743{
 744	return dram_addr_to_sys_addr(mci,
 745				     input_addr_to_dram_addr(mci, input_addr));
 746}
 747
 748/*
 749 * Find the minimum and maximum InputAddr values that map to the given @csrow.
 750 * Pass back these values in *input_addr_min and *input_addr_max.
 751 */
 752static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
 753			      u64 *input_addr_min, u64 *input_addr_max)
 754{
 755	struct amd64_pvt *pvt;
 756	u64 base, mask;
 757
 758	pvt = mci->pvt_info;
 759	BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
 760
 761	base = base_from_dct_base(pvt, csrow);
 762	mask = mask_from_dct_mask(pvt, csrow);
 763
 764	*input_addr_min = base & ~mask;
 765	*input_addr_max = base | mask | pvt->dcs_mask_notused;
 766}
 767
 768/* Map the Error address to a PAGE and PAGE OFFSET. */
 769static inline void error_address_to_page_and_offset(u64 error_address,
 770						    u32 *page, u32 *offset)
 771{
 772	*page = (u32) (error_address >> PAGE_SHIFT);
 773	*offset = ((u32) error_address) & ~PAGE_MASK;
 774}
 775
 776/*
 777 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
 778 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
 779 * of a node that detected an ECC memory error.  mci represents the node that
 780 * the error address maps to (possibly different from the node that detected
 781 * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
 782 * error.
 783 */
 784static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
 785{
 786	int csrow;
 787
 788	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
 789
 790	if (csrow == -1)
 791		amd64_mc_printk(mci, KERN_ERR,
 792			     "Failed to translate InputAddr to csrow for "
 793			     "address 0x%lx\n", (unsigned long)sys_addr);
 794	return csrow;
 795}
 796
 797static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
 798
 799static void amd64_cpu_display_info(struct amd64_pvt *pvt)
 800{
 801	if (boot_cpu_data.x86 == 0x11)
 802		edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
 803	else if (boot_cpu_data.x86 == 0x10)
 804		edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
 805	else if (boot_cpu_data.x86 == 0xf)
 806		edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
 807			(pvt->ext_model >= K8_REV_F) ?
 808			"Rev F or later" : "Rev E or earlier");
 809	else
 810		/* we'll hardly ever ever get here */
 811		edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
 812}
 813
 814/*
 815 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
 816 * are ECC capable.
 817 */
 818static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
 819{
 820	int bit;
 821	enum dev_type edac_cap = EDAC_FLAG_NONE;
 822
 823	bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
 824		? 19
 825		: 17;
 826
 827	if (pvt->dclr0 & BIT(bit))
 828		edac_cap = EDAC_FLAG_SECDED;
 829
 830	return edac_cap;
 831}
 832
 833
 834static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
 835
 836static void amd64_dump_dramcfg_low(u32 dclr, int chan)
 837{
 838	debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
 839
 840	debugf1("  DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
 841		(dclr & BIT(16)) ?  "un" : "",
 842		(dclr & BIT(19)) ? "yes" : "no");
 843
 844	debugf1("  PAR/ERR parity: %s\n",
 845		(dclr & BIT(8)) ?  "enabled" : "disabled");
 846
 847	debugf1("  DCT 128bit mode width: %s\n",
 848		(dclr & BIT(11)) ?  "128b" : "64b");
 849
 850	debugf1("  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
 851		(dclr & BIT(12)) ?  "yes" : "no",
 852		(dclr & BIT(13)) ?  "yes" : "no",
 853		(dclr & BIT(14)) ?  "yes" : "no",
 854		(dclr & BIT(15)) ?  "yes" : "no");
 855}
 856
 857/* Display and decode various NB registers for debug purposes. */
 858static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
 859{
 860	int ganged;
 861
 862	debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
 863
 864	debugf1("  NB two channel DRAM capable: %s\n",
 865		(pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
 866
 867	debugf1("  ECC capable: %s, ChipKill ECC capable: %s\n",
 868		(pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
 869		(pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
 870
 871	amd64_dump_dramcfg_low(pvt->dclr0, 0);
 872
 873	debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
 874
 875	debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
 876			"offset: 0x%08x\n",
 877			pvt->dhar,
 878			dhar_base(pvt->dhar),
 879			(boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
 880						   : f10_dhar_offset(pvt->dhar));
 881
 882	debugf1("  DramHoleValid: %s\n",
 883		(pvt->dhar & DHAR_VALID) ? "yes" : "no");
 884
 885	/* everything below this point is Fam10h and above */
 886	if (boot_cpu_data.x86 == 0xf) {
 887		amd64_debug_display_dimm_sizes(0, pvt);
 888		return;
 889	}
 890
 891	/* Only if NOT ganged does dclr1 have valid info */
 892	if (!dct_ganging_enabled(pvt))
 893		amd64_dump_dramcfg_low(pvt->dclr1, 1);
 894
 895	/*
 896	 * Determine if ganged and then dump memory sizes for first controller,
 897	 * and if NOT ganged dump info for 2nd controller.
 898	 */
 899	ganged = dct_ganging_enabled(pvt);
 900
 901	amd64_debug_display_dimm_sizes(0, pvt);
 902
 903	if (!ganged)
 904		amd64_debug_display_dimm_sizes(1, pvt);
 905}
 906
 907/* Read in both of DBAM registers */
 908static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
 909{
 910	amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0);
 911
 912	if (boot_cpu_data.x86 >= 0x10)
 913		amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1);
 914}
 915
 916/*
 917 * NOTE: CPU Revision Dependent code: Rev E and Rev F
 918 *
 919 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
 920 * set the shift factor for the DCSB and DCSM values.
 921 *
 922 * ->dcs_mask_notused, RevE:
 923 *
 924 * To find the max InputAddr for the csrow, start with the base address and set
 925 * all bits that are "don't care" bits in the test at the start of section
 926 * 3.5.4 (p. 84).
 927 *
 928 * The "don't care" bits are all set bits in the mask and all bits in the gaps
 929 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
 930 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
 931 * gaps.
 932 *
 933 * ->dcs_mask_notused, RevF and later:
 934 *
 935 * To find the max InputAddr for the csrow, start with the base address and set
 936 * all bits that are "don't care" bits in the test at the start of NPT section
 937 * 4.5.4 (p. 87).
 938 *
 939 * The "don't care" bits are all set bits in the mask and all bits in the gaps
 940 * between bit ranges [36:27] and [21:13].
 941 *
 942 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
 943 * which are all bits in the above-mentioned gaps.
 944 */
 945static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
 946{
 947
 948	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
 949		pvt->dcsb_base		= REV_E_DCSB_BASE_BITS;
 950		pvt->dcsm_mask		= REV_E_DCSM_MASK_BITS;
 951		pvt->dcs_mask_notused	= REV_E_DCS_NOTUSED_BITS;
 952		pvt->dcs_shift		= REV_E_DCS_SHIFT;
 953		pvt->cs_count		= 8;
 954		pvt->num_dcsm		= 8;
 955	} else {
 956		pvt->dcsb_base		= REV_F_F1Xh_DCSB_BASE_BITS;
 957		pvt->dcsm_mask		= REV_F_F1Xh_DCSM_MASK_BITS;
 958		pvt->dcs_mask_notused	= REV_F_F1Xh_DCS_NOTUSED_BITS;
 959		pvt->dcs_shift		= REV_F_F1Xh_DCS_SHIFT;
 960
 961		if (boot_cpu_data.x86 == 0x11) {
 962			pvt->cs_count = 4;
 963			pvt->num_dcsm = 2;
 964		} else {
 965			pvt->cs_count = 8;
 966			pvt->num_dcsm = 4;
 967		}
 968	}
 969}
 970
 971/*
 972 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
 973 */
 974static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
 975{
 976	int cs, reg;
 977
 978	amd64_set_dct_base_and_mask(pvt);
 979
 980	for (cs = 0; cs < pvt->cs_count; cs++) {
 981		reg = K8_DCSB0 + (cs * 4);
 982		if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs]))
 983			debugf0("  DCSB0[%d]=0x%08x reg: F2x%x\n",
 984				cs, pvt->dcsb0[cs], reg);
 985
 986		/* If DCT are NOT ganged, then read in DCT1's base */
 987		if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
 988			reg = F10_DCSB1 + (cs * 4);
 989			if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
 990						&pvt->dcsb1[cs]))
 991				debugf0("  DCSB1[%d]=0x%08x reg: F2x%x\n",
 992					cs, pvt->dcsb1[cs], reg);
 993		} else {
 994			pvt->dcsb1[cs] = 0;
 995		}
 996	}
 997
 998	for (cs = 0; cs < pvt->num_dcsm; cs++) {
 999		reg = K8_DCSM0 + (cs * 4);
1000		if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs]))
1001			debugf0("    DCSM0[%d]=0x%08x reg: F2x%x\n",
1002				cs, pvt->dcsm0[cs], reg);
1003
1004		/* If DCT are NOT ganged, then read in DCT1's mask */
1005		if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
1006			reg = F10_DCSM1 + (cs * 4);
1007			if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
1008						&pvt->dcsm1[cs]))
1009				debugf0("    DCSM1[%d]=0x%08x reg: F2x%x\n",
1010					cs, pvt->dcsm1[cs], reg);
1011		} else {
1012			pvt->dcsm1[cs] = 0;
1013		}
1014	}
1015}
1016
1017static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
1018{
1019	enum mem_type type;
1020
1021	if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
1022		if (pvt->dchr0 & DDR3_MODE)
1023			type = (pvt->dclr0 & BIT(16)) ?	MEM_DDR3 : MEM_RDDR3;
1024		else
1025			type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1026	} else {
1027		type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1028	}
1029
1030	debugf1("  Memory type is: %s\n", edac_mem_types[type]);
1031
1032	return type;
1033}
1034
1035/*
1036 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
1037 * and the later RevF memory controllers (DDR vs DDR2)
1038 *
1039 * Return:
1040 *      number of memory channels in operation
1041 * Pass back:
1042 *      contents of the DCL0_LOW register
1043 */
1044static int k8_early_channel_count(struct amd64_pvt *pvt)
1045{
1046	int flag, err = 0;
1047
1048	err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1049	if (err)
1050		return err;
1051
1052	if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) {
1053		/* RevF (NPT) and later */
1054		flag = pvt->dclr0 & F10_WIDTH_128;
1055	} else {
1056		/* RevE and earlier */
1057		flag = pvt->dclr0 & REVE_WIDTH_128;
1058	}
1059
1060	/* not used */
1061	pvt->dclr1 = 0;
1062
1063	return (flag) ? 2 : 1;
1064}
1065
1066/* extract the ERROR ADDRESS for the K8 CPUs */
1067static u64 k8_get_error_address(struct mem_ctl_info *mci,
1068				struct err_regs *info)
1069{
1070	return (((u64) (info->nbeah & 0xff)) << 32) +
1071			(info->nbeal & ~0x03);
1072}
1073
1074/*
1075 * Read the Base and Limit registers for K8 based Memory controllers; extract
1076 * fields from the 'raw' reg into separate data fields
1077 *
1078 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
1079 */
1080static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1081{
1082	u32 low;
1083	u32 off = dram << 3;	/* 8 bytes between DRAM entries */
1084
1085	amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low);
1086
1087	/* Extract parts into separate data entries */
1088	pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1089	pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1090	pvt->dram_rw_en[dram] = (low & 0x3);
1091
1092	amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low);
1093
1094	/*
1095	 * Extract parts into separate data entries. Limit is the HIGHEST memory
1096	 * location of the region, so lower 24 bits need to be all ones
1097	 */
1098	pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
1099	pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1100	pvt->dram_DstNode[dram] = (low & 0x7);
1101}
1102
1103static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1104					struct err_regs *info,
1105					u64 sys_addr)
1106{
1107	struct mem_ctl_info *src_mci;
1108	unsigned short syndrome;
1109	int channel, csrow;
1110	u32 page, offset;
1111
1112	/* Extract the syndrome parts and form a 16-bit syndrome */
1113	syndrome  = HIGH_SYNDROME(info->nbsl) << 8;
1114	syndrome |= LOW_SYNDROME(info->nbsh);
1115
1116	/* CHIPKILL enabled */
1117	if (info->nbcfg & K8_NBCFG_CHIPKILL) {
1118		channel = get_channel_from_ecc_syndrome(mci, syndrome);
1119		if (channel < 0) {
1120			/*
1121			 * Syndrome didn't map, so we don't know which of the
1122			 * 2 DIMMs is in error. So we need to ID 'both' of them
1123			 * as suspect.
1124			 */
1125			amd64_mc_printk(mci, KERN_WARNING,
1126				       "unknown syndrome 0x%x - possible error "
1127				       "reporting race\n", syndrome);
1128			edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1129			return;
1130		}
1131	} else {
1132		/*
1133		 * non-chipkill ecc mode
1134		 *
1135		 * The k8 documentation is unclear about how to determine the
1136		 * channel number when using non-chipkill memory.  This method
1137		 * was obtained from email communication with someone at AMD.
1138		 * (Wish the email was placed in this comment - norsk)
1139		 */
1140		channel = ((sys_addr & BIT(3)) != 0);
1141	}
1142
1143	/*
1144	 * Find out which node the error address belongs to. This may be
1145	 * different from the node that detected the error.
1146	 */
1147	src_mci = find_mc_by_sys_addr(mci, sys_addr);
1148	if (!src_mci) {
1149		amd64_mc_printk(mci, KERN_ERR,
1150			     "failed to map error address 0x%lx to a node\n",
1151			     (unsigned long)sys_addr);
1152		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1153		return;
1154	}
1155
1156	/* Now map the sys_addr to a CSROW */
1157	csrow = sys_addr_to_csrow(src_mci, sys_addr);
1158	if (csrow < 0) {
1159		edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1160	} else {
1161		error_address_to_page_and_offset(sys_addr, &page, &offset);
1162
1163		edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1164				  channel, EDAC_MOD_STR);
1165	}
1166}
1167
1168static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1169{
1170	int *dbam_map;
1171
1172	if (pvt->ext_model >= K8_REV_F)
1173		dbam_map = ddr2_dbam;
1174	else if (pvt->ext_model >= K8_REV_D)
1175		dbam_map = ddr2_dbam_revD;
1176	else
1177		dbam_map = ddr2_dbam_revCG;
1178
1179	return dbam_map[cs_mode];
1180}
1181
1182/*
1183 * Get the number of DCT channels in use.
1184 *
1185 * Return:
1186 *	number of Memory Channels in operation
1187 * Pass back:
1188 *	contents of the DCL0_LOW register
1189 */
1190static int f10_early_channel_count(struct amd64_pvt *pvt)
1191{
1192	int dbams[] = { DBAM0, DBAM1 };
1193	int i, j, channels = 0;
1194	u32 dbam;
1195
1196	/* If we are in 128 bit mode, then we are using 2 channels */
1197	if (pvt->dclr0 & F10_WIDTH_128) {
1198		channels = 2;
1199		return channels;
1200	}
1201
1202	/*
1203	 * Need to check if in unganged mode: In such, there are 2 channels,
1204	 * but they are not in 128 bit mode and thus the above 'dclr0' status
1205	 * bit will be OFF.
1206	 *
1207	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1208	 * their CSEnable bit on. If so, then SINGLE DIMM case.
1209	 */
1210	debugf0("Data width is not 128 bits - need more decoding\n");
1211
1212	/*
1213	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1214	 * is more than just one DIMM present in unganged mode. Need to check
1215	 * both controllers since DIMMs can be placed in either one.
1216	 */
1217	for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1218		if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam))
1219			goto err_reg;
1220
1221		for (j = 0; j < 4; j++) {
1222			if (DBAM_DIMM(j, dbam) > 0) {
1223				channels++;
1224				break;
1225			}
1226		}
1227	}
1228
1229	if (channels > 2)
1230		channels = 2;
1231
1232	debugf0("MCT channel count: %d\n", channels);
1233
1234	return channels;
1235
1236err_reg:
1237	return -1;
1238
1239}
1240
1241static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1242{
1243	int *dbam_map;
1244
1245	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1246		dbam_map = ddr3_dbam;
1247	else
1248		dbam_map = ddr2_dbam;
1249
1250	return dbam_map[cs_mode];
1251}
1252
1253/* Enable extended configuration access via 0xCF8 feature */
1254static void amd64_setup(struct amd64_pvt *pvt)
1255{
1256	u32 reg;
1257
1258	amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1259
1260	pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
1261	reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1262	pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1263}
1264
1265/* Restore the extended configuration access via 0xCF8 feature */
1266static void amd64_teardown(struct amd64_pvt *pvt)
1267{
1268	u32 reg;
1269
1270	amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1271
1272	reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1273	if (pvt->flags.cf8_extcfg)
1274		reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1275	pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1276}
1277
1278static u64 f10_get_error_address(struct mem_ctl_info *mci,
1279			struct err_regs *info)
1280{
1281	return (((u64) (info->nbeah & 0xffff)) << 32) +
1282			(info->nbeal & ~0x01);
1283}
1284
1285/*
1286 * Read the Base and Limit registers for F10 based Memory controllers. Extract
1287 * fields from the 'raw' reg into separate data fields.
1288 *
1289 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
1290 */
1291static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1292{
1293	u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
1294
1295	low_offset = K8_DRAM_BASE_LOW + (dram << 3);
1296	high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1297
1298	/* read the 'raw' DRAM BASE Address register */
1299	amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base);
1300
1301	/* Read from the ECS data register */
1302	amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base);
1303
1304	/* Extract parts into separate data entries */
1305	pvt->dram_rw_en[dram] = (low_base & 0x3);
1306
1307	if (pvt->dram_rw_en[dram] == 0)
1308		return;
1309
1310	pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1311
1312	pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1313			       (((u64)low_base  & 0xFFFF0000) << 8);
1314
1315	low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1316	high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1317
1318	/* read the 'raw' LIMIT registers */
1319	amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit);
1320
1321	/* Read from the ECS data register for the HIGH portion */
1322	amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit);
1323
1324	pvt->dram_DstNode[dram] = (low_limit & 0x7);
1325	pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1326
1327	/*
1328	 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1329	 * memory location of the region, so low 24 bits need to be all ones.
1330	 */
1331	pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1332				(((u64) low_limit & 0xFFFF0000) << 8) |
1333				0x00FFFFFF;
1334}
1335
1336static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1337{
1338
1339	if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
1340				&pvt->dram_ctl_select_low)) {
1341		debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
1342			"High range addresses at: 0x%x\n",
1343			pvt->dram_ctl_select_low,
1344			dct_sel_baseaddr(pvt));
1345
1346		debugf0("  DCT mode: %s, All DCTs on: %s\n",
1347			(dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1348			(dct_dram_enabled(pvt) ? "yes"   : "no"));
1349
1350		if (!dct_ganging_enabled(pvt))
1351			debugf0("  Address range split per DCT: %s\n",
1352				(dct_high_range_enabled(pvt) ? "yes" : "no"));
1353
1354		debugf0("  DCT data interleave for ECC: %s, "
1355			"DRAM cleared since last warm reset: %s\n",
1356			(dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1357			(dct_memory_cleared(pvt) ? "yes" : "no"));
1358
1359		debugf0("  DCT channel interleave: %s, "
1360			"DCT interleave bits selector: 0x%x\n",
1361			(dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1362			dct_sel_interleave_addr(pvt));
1363	}
1364
1365	amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
1366			   &pvt->dram_ctl_select_high);
1367}
1368
1369/*
1370 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1371 * Interleaving Modes.
1372 */
1373static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1374				int hi_range_sel, u32 intlv_en)
1375{
1376	u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
1377
1378	if (dct_ganging_enabled(pvt))
1379		cs = 0;
1380	else if (hi_range_sel)
1381		cs = dct_sel_high;
1382	else if (dct_interleave_enabled(pvt)) {
1383		/*
1384		 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1385		 */
1386		if (dct_sel_interleave_addr(pvt) == 0)
1387			cs = sys_addr >> 6 & 1;
1388		else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1389			temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1390
1391			if (dct_sel_interleave_addr(pvt) & 1)
1392				cs = (sys_addr >> 9 & 1) ^ temp;
1393			else
1394				cs = (sys_addr >> 6 & 1) ^ temp;
1395		} else if (intlv_en & 4)
1396			cs = sys_addr >> 15 & 1;
1397		else if (intlv_en & 2)
1398			cs = sys_addr >> 14 & 1;
1399		else if (intlv_en & 1)
1400			cs = sys_addr >> 13 & 1;
1401		else
1402			cs = sys_addr >> 12 & 1;
1403	} else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1404		cs = ~dct_sel_high & 1;
1405	else
1406		cs = 0;
1407
1408	return cs;
1409}
1410
1411static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1412{
1413	if (intlv_en == 1)
1414		return 1;
1415	else if (intlv_en == 3)
1416		return 2;
1417	else if (intlv_en == 7)
1418		return 3;
1419
1420	return 0;
1421}
1422
1423/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1424static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1425						 u32 dct_sel_base_addr,
1426						 u64 dct_sel_base_off,
1427						 u32 hole_valid, u32 hole_off,
1428						 u64 dram_base)
1429{
1430	u64 chan_off;
1431
1432	if (hi_range_sel) {
1433		if (!(dct_sel_base_addr & 0xFFFF0000) &&
1434		   hole_valid && (sys_addr >= 0x100000000ULL))
1435			chan_off = hole_off << 16;
1436		else
1437			chan_off = dct_sel_base_off;
1438	} else {
1439		if (hole_valid && (sys_addr >= 0x100000000ULL))
1440			chan_off = hole_off << 16;
1441		else
1442			chan_off = dram_base & 0xFFFFF8000000ULL;
1443	}
1444
1445	return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1446			(chan_off & 0x0000FFFFFF800000ULL);
1447}
1448
1449/* Hack for the time being - Can we get this from BIOS?? */
1450#define	CH0SPARE_RANK	0
1451#define	CH1SPARE_RANK	1
1452
1453/*
1454 * checks if the csrow passed in is marked as SPARED, if so returns the new
1455 * spare row
1456 */
1457static inline int f10_process_possible_spare(int csrow,
1458				u32 cs, struct amd64_pvt *pvt)
1459{
1460	u32 swap_done;
1461	u32 bad_dram_cs;
1462
1463	/* Depending on channel, isolate respective SPARING info */
1464	if (cs) {
1465		swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1466		bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1467		if (swap_done && (csrow == bad_dram_cs))
1468			csrow = CH1SPARE_RANK;
1469	} else {
1470		swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1471		bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1472		if (swap_done && (csrow == bad_dram_cs))
1473			csrow = CH0SPARE_RANK;
1474	}
1475	return csrow;
1476}
1477
1478/*
1479 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1480 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1481 *
1482 * Return:
1483 *	-EINVAL:  NOT FOUND
1484 *	0..csrow = Chip-Select Row
1485 */
1486static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1487{
1488	struct mem_ctl_info *mci;
1489	struct amd64_pvt *pvt;
1490	u32 cs_base, cs_mask;
1491	int cs_found = -EINVAL;
1492	int csrow;
1493
1494	mci = mci_lookup[nid];
1495	if (!mci)
1496		return cs_found;
1497
1498	pvt = mci->pvt_info;
1499
1500	debugf1("InputAddr=0x%x  channelselect=%d\n", in_addr, cs);
1501
1502	for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1503
1504		cs_base = amd64_get_dct_base(pvt, cs, csrow);
1505		if (!(cs_base & K8_DCSB_CS_ENABLE))
1506			continue;
1507
1508		/*
1509		 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1510		 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1511		 * of the actual address.
1512		 */
1513		cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1514
1515		/*
1516		 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1517		 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1518		 */
1519		cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1520
1521		debugf1("    CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
1522				csrow, cs_base, cs_mask);
1523
1524		cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
1525
1526		debugf1("              Final CSMask=0x%x\n", cs_mask);
1527		debugf1("    (InputAddr & ~CSMask)=0x%x "
1528				"(CSBase & ~CSMask)=0x%x\n",
1529				(in_addr & ~cs_mask), (cs_base & ~cs_mask));
1530
1531		if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
1532			cs_found = f10_process_possible_spare(csrow, cs, pvt);
1533
1534			debugf1(" MATCH csrow=%d\n", cs_found);
1535			break;
1536		}
1537	}
1538	return cs_found;
1539}
1540
1541/* For a given @dram_range, check if @sys_addr falls within it. */
1542static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1543				  u64 sys_addr, int *nid, int *chan_sel)
1544{
1545	int node_id, cs_found = -EINVAL, high_range = 0;
1546	u32 intlv_en, intlv_sel, intlv_shift, hole_off;
1547	u32 hole_valid, tmp, dct_sel_base, channel;
1548	u64 dram_base, chan_addr, dct_sel_base_off;
1549
1550	dram_base = pvt->dram_base[dram_range];
1551	intlv_en = pvt->dram_IntlvEn[dram_range];
1552
1553	node_id = pvt->dram_DstNode[dram_range];
1554	intlv_sel = pvt->dram_IntlvSel[dram_range];
1555
1556	debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1557		dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
1558
1559	/*
1560	 * This assumes that one node's DHAR is the same as all the other
1561	 * nodes' DHAR.
1562	 */
1563	hole_off = (pvt->dhar & 0x0000FF80);
1564	hole_valid = (pvt->dhar & 0x1);
1565	dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1566
1567	debugf1("   HoleOffset=0x%x  HoleValid=0x%x IntlvSel=0x%x\n",
1568			hole_off, hole_valid, intlv_sel);
1569
1570	if (intlv_en ||
1571	    (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1572		return -EINVAL;
1573
1574	dct_sel_base = dct_sel_baseaddr(pvt);
1575
1576	/*
1577	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1578	 * select between DCT0 and DCT1.
1579	 */
1580	if (dct_high_range_enabled(pvt) &&
1581	   !dct_ganging_enabled(pvt) &&
1582	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1583		high_range = 1;
1584
1585	channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1586
1587	chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1588					     dct_sel_base_off, hole_valid,
1589					     hole_off, dram_base);
1590
1591	intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1592
1593	/* remove Node ID (in case of memory interleaving) */
1594	tmp = chan_addr & 0xFC0;
1595
1596	chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1597
1598	/* remove channel interleave and hash */
1599	if (dct_interleave_enabled(pvt) &&
1600	   !dct_high_range_enabled(pvt) &&
1601	   !dct_ganging_enabled(pvt)) {
1602		if (dct_sel_interleave_addr(pvt) != 1)
1603			chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1604		else {
1605			tmp = chan_addr & 0xFC0;
1606			chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1607					| tmp;
1608		}
1609	}
1610
1611	debugf1("   (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
1612		chan_addr, (u32)(chan_addr >> 8));
1613
1614	cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
1615
1616	if (cs_found >= 0) {
1617		*nid = node_id;
1618		*chan_sel = channel;
1619	}
1620	return cs_found;
1621}
1622
1623static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1624				       int *node, int *chan_sel)
1625{
1626	int dram_range, cs_found = -EINVAL;
1627	u64 dram_base, dram_limit;
1628
1629	for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
1630
1631		if (!pvt->dram_rw_en[dram_range])
1632			continue;
1633
1634		dram_base = pvt->dram_base[dram_range];
1635		dram_limit = pvt->dram_limit[dram_range];
1636
1637		if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
1638
1639			cs_found = f10_match_to_this_node(pvt, dram_range,
1640							  sys_addr, node,
1641							  chan_sel);
1642			if (cs_found >= 0)
1643				break;
1644		}
1645	}
1646	return cs_found;
1647}
1648
1649/*
1650 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1651 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1652 *
1653 * The @sys_addr is usually an error address received from the hardware
1654 * (MCX_ADDR).
1655 */
1656static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1657				     struct err_regs *info,
1658				     u64 sys_addr)
1659{
1660	struct amd64_pvt *pvt = mci->pvt_info;
1661	u32 page, offset;
1662	unsigned short syndrome;
1663	int nid, csrow, chan = 0;
1664
1665	csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1666
1667	if (csrow < 0) {
1668		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1669		return;
1670	}
1671
1672	error_address_to_page_and_offset(sys_addr, &page, &offset);
1673
1674	syndrome  = HIGH_SYNDROME(info->nbsl) << 8;
1675	syndrome |= LOW_SYNDROME(info->nbsh);
1676
1677	/*
1678	 * We need the syndromes for channel detection only when we're
1679	 * ganged. Otherwise @chan should already contain the channel at
1680	 * this point.
1681	 */
1682	if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
1683		chan = get_channel_from_ecc_syndrome(mci, syndrome);
1684
1685	if (chan >= 0)
1686		edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1687				  EDAC_MOD_STR);
1688	else
1689		/*
1690		 * Channel unknown, report all channels on this CSROW as failed.
1691		 */
1692		for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1693			edac_mc_handle_ce(mci, page, offset, syndrome,
1694					  csrow, chan, EDAC_MOD_STR);
1695}
1696
1697/*
1698 * debug routine to display the memory sizes of all logical DIMMs and its
1699 * CSROWs as well
1700 */
1701static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1702{
1703	int dimm, size0, size1, factor = 0;
1704	u32 dbam;
1705	u32 *dcsb;
1706
1707	if (boot_cpu_data.x86 == 0xf) {
1708		if (pvt->dclr0 & F10_WIDTH_128)
1709			factor = 1;
1710
1711		/* K8 families < revF not supported yet */
1712	       if (pvt->ext_model < K8_REV_F)
1713			return;
1714	       else
1715		       WARN_ON(ctrl != 0);
1716	}
1717
1718	debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1719		ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
1720
1721	dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1722	dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1723
1724	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1725
1726	/* Dump memory sizes for DIMM and its CSROWs */
1727	for (dimm = 0; dimm < 4; dimm++) {
1728
1729		size0 = 0;
1730		if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1731			size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1732
1733		size1 = 0;
1734		if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1735			size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1736
1737		edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n",
1738			    dimm * 2,     size0 << factor,
1739			    dimm * 2 + 1, size1 << factor);
1740	}
1741}
1742
1743/*
1744 * There currently are 3 typ…

Large files files are truncated, but you can click here to view the full file