28 #include <linux/module.h>
30 #include <linux/pci.h>
32 #include <linux/slab.h>
39 #include <asm/processor.h>
40 #include <asm/div64.h>
49 static int use_pci_fixup;
51 MODULE_PARM_DESC(use_pci_fixup,
"Enable PCI fixup to seek for hidden devices");
58 #define MAX_SOCKET_BUSES 2
64 #define I7CORE_REVISION " Ver: 1.0.0"
65 #define EDAC_MOD_STR "i7core_edac"
70 #define i7core_printk(level, fmt, arg...) \
71 edac_printk(level, "i7core", fmt, ##arg)
73 #define i7core_mc_printk(mci, level, fmt, arg...) \
74 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
82 #define MC_CFG_CONTROL 0x90
83 #define MC_CFG_UNLOCK 0x02
84 #define MC_CFG_LOCK 0x00
88 #define MC_CONTROL 0x48
89 #define MC_STATUS 0x4c
90 #define MC_MAX_DOD 0x64
97 #define MC_TEST_ERR_RCV1 0x60
98 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
100 #define MC_TEST_ERR_RCV0 0x64
101 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
102 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
105 #define MC_SSRCONTROL 0x48
106 #define SSR_MODE_DISABLE 0x00
107 #define SSR_MODE_ENABLE 0x01
108 #define SSR_MODE_MASK 0x03
110 #define MC_SCRUB_CONTROL 0x4c
111 #define STARTSCRUB (1 << 24)
112 #define SCRUBINTERVAL_MASK 0xffffff
114 #define MC_COR_ECC_CNT_0 0x80
115 #define MC_COR_ECC_CNT_1 0x84
116 #define MC_COR_ECC_CNT_2 0x88
117 #define MC_COR_ECC_CNT_3 0x8c
118 #define MC_COR_ECC_CNT_4 0x90
119 #define MC_COR_ECC_CNT_5 0x94
121 #define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
122 #define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
127 #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
128 #define THREE_DIMMS_PRESENT (1 << 24)
129 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
130 #define QUAD_RANK_PRESENT (1 << 22)
131 #define REGISTERED_DIMM (1 << 15)
133 #define MC_CHANNEL_MAPPER 0x60
134 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
135 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
137 #define MC_CHANNEL_RANK_PRESENT 0x7c
138 #define RANK_PRESENT_MASK 0xffff
140 #define MC_CHANNEL_ADDR_MATCH 0xf0
141 #define MC_CHANNEL_ERROR_MASK 0xf8
142 #define MC_CHANNEL_ERROR_INJECT 0xfc
143 #define INJECT_ADDR_PARITY 0x10
144 #define INJECT_ECC 0x08
145 #define MASK_CACHELINE 0x06
146 #define MASK_FULL_CACHELINE 0x06
147 #define MASK_MSB32_CACHELINE 0x04
148 #define MASK_LSB32_CACHELINE 0x02
149 #define NO_MASK_CACHELINE 0x00
150 #define REPEAT_EN 0x01
154 #define MC_DOD_CH_DIMM0 0x48
155 #define MC_DOD_CH_DIMM1 0x4c
156 #define MC_DOD_CH_DIMM2 0x50
157 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
158 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
159 #define DIMM_PRESENT_MASK (1 << 9)
160 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
161 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
162 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
163 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
164 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
165 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
166 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
167 #define MC_DOD_NUMCOL_MASK 3
168 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
170 #define MC_RANK_PRESENT 0x7c
172 #define MC_SAG_CH_0 0x80
173 #define MC_SAG_CH_1 0x84
174 #define MC_SAG_CH_2 0x88
175 #define MC_SAG_CH_3 0x8c
176 #define MC_SAG_CH_4 0x90
177 #define MC_SAG_CH_5 0x94
178 #define MC_SAG_CH_6 0x98
179 #define MC_SAG_CH_7 0x9c
181 #define MC_RIR_LIMIT_CH_0 0x40
182 #define MC_RIR_LIMIT_CH_1 0x44
183 #define MC_RIR_LIMIT_CH_2 0x48
184 #define MC_RIR_LIMIT_CH_3 0x4C
185 #define MC_RIR_LIMIT_CH_4 0x50
186 #define MC_RIR_LIMIT_CH_5 0x54
187 #define MC_RIR_LIMIT_CH_6 0x58
188 #define MC_RIR_LIMIT_CH_7 0x5C
189 #define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
191 #define MC_RIR_WAY_CH 0x80
192 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
193 #define MC_RIR_WAY_RANK_MASK 0x7
201 #define MAX_MCR_FUNC 4
202 #define MAX_CHAN_FUNC 3
291 #define PCI_DESCR(device, function, device_id) \
293 .func = (function), \
294 .dev_id = (device_id)
296 static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
333 static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
355 static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
386 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
408 #define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
409 #define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
412 #define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
413 #define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
416 static inline int numdimms(
u32 dimms)
418 return (dimms & 0x3) + 1;
421 static inline int numrank(
u32 rank)
423 static int ranks[4] = { 1, 2, 4, -
EINVAL };
425 return ranks[rank & 0x3];
428 static inline int numbank(
u32 bank)
432 return banks[bank & 0x3];
435 static inline int numrow(
u32 row)
437 static int rows[8] = {
438 1 << 12, 1 << 13, 1 << 14, 1 << 15,
442 return rows[row & 0x7];
445 static inline int numcol(
u32 col)
447 static int cols[8] = {
448 1 << 10, 1 << 11, 1 << 12, -
EINVAL,
450 return cols[col & 0x3];
458 if (i7core_dev->
socket == socket)
465 static struct i7core_dev *alloc_i7core_dev(
u8 socket,
468 struct i7core_dev *i7core_dev;
470 i7core_dev = kzalloc(
sizeof(*i7core_dev),
GFP_KERNEL);
474 i7core_dev->
pdev = kzalloc(
sizeof(*i7core_dev->
pdev) * table->
n_devs,
476 if (!i7core_dev->
pdev) {
488 static void free_i7core_dev(
struct i7core_dev *i7core_dev)
515 pci_read_config_dword(pdev,
MC_STATUS, &pvt->
info.mc_status);
519 edac_dbg(0,
"QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
521 pvt->
info.mc_status, pvt->
info.max_dod, pvt->
info.ch_map);
524 edac_dbg(0,
"ECC enabled with x%d SDCC\n",
ECCx8(pvt) ? 8 : 4);
535 edac_dbg(0,
"DOD Max limits: DIMMS: %d, %d-ranked, %d-banked x%x x 0x%x\n",
536 numdimms(pvt->
info.max_dod),
537 numrank(pvt->
info.max_dod >> 2),
538 numbank(pvt->
info.max_dod >> 4),
539 numrow(pvt->
info.max_dod >> 6),
540 numcol(pvt->
info.max_dod >> 9));
549 edac_dbg(0,
"Channel %i is not active\n", i);
553 edac_dbg(0,
"Channel %i is disabled\n", i);
558 pci_read_config_dword(pvt->
pci_ch[i][0],
563 pvt->
channel[
i].is_3dimms_present =
true;
566 pvt->
channel[
i].is_single_4rank =
true;
577 pci_read_config_dword(pvt->
pci_ch[i][1],
579 pci_read_config_dword(pvt->
pci_ch[i][1],
581 pci_read_config_dword(pvt->
pci_ch[i][1],
584 edac_dbg(0,
"Ch%d phy rd%d, wr%d (0x%08x): %s%s%s%cDIMMs\n",
588 pvt->
channel[i].is_3dimms_present ?
"3DIMMS " :
"",
589 pvt->
channel[i].is_3dimms_present ?
"SINGLE_4R " :
"",
590 pvt->
channel[i].has_4rank ?
"HAS_4R " :
"",
591 (data & REGISTERED_DIMM) ?
'R' :
'U');
593 for (j = 0; j < 3; j++) {
608 size = (rows * cols * banks * ranks) >> (20 - 3);
610 edac_dbg(0,
"\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
613 banks, ranks, rows, cols);
634 "CPU#%uChannel#%u_DIMM#%u",
641 pci_read_config_dword(pdev,
MC_SAG_CH_0, &value[0]);
642 pci_read_config_dword(pdev,
MC_SAG_CH_1, &value[1]);
643 pci_read_config_dword(pdev,
MC_SAG_CH_2, &value[2]);
644 pci_read_config_dword(pdev,
MC_SAG_CH_3, &value[3]);
645 pci_read_config_dword(pdev,
MC_SAG_CH_4, &value[4]);
646 pci_read_config_dword(pdev,
MC_SAG_CH_5, &value[5]);
647 pci_read_config_dword(pdev,
MC_SAG_CH_6, &value[6]);
648 pci_read_config_dword(pdev,
MC_SAG_CH_7, &value[7]);
649 edac_dbg(1,
"\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
650 for (j = 0; j < 8; j++)
652 (value[j] >> 27) & 0x1,
653 (value[j] >> 24) & 0x7,
654 (value[j] & ((1 << 24) - 1)));
664 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
673 static int disable_inject(
const struct mem_ctl_info *mci)
682 pci_write_config_dword(pvt->
pci_ch[pvt->
inject.channel][0],
697 const char *data,
size_t count)
708 if ((rc < 0) || (value > 3))
715 static ssize_t i7core_inject_section_show(
struct device *dev,
734 const char *data,
size_t count)
745 if ((rc < 0) || (value > 7))
772 static ssize_t i7core_inject_eccmask_store(
struct device *dev,
774 const char *data,
size_t count)
792 static ssize_t i7core_inject_eccmask_show(
struct device *dev,
813 #define DECLARE_ADDR_MATCH(param, limit) \
814 static ssize_t i7core_inject_store_##param( \
815 struct device *dev, \
816 struct device_attribute *mattr, \
817 const char *data, size_t count) \
819 struct mem_ctl_info *mci = dev_get_drvdata(dev); \
820 struct i7core_pvt *pvt; \
825 pvt = mci->pvt_info; \
827 if (pvt->inject.enable) \
828 disable_inject(mci); \
830 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
833 rc = strict_strtoul(data, 10, &value); \
834 if ((rc < 0) || (value >= limit)) \
838 pvt->inject.param = value; \
843 static ssize_t i7core_inject_show_##param( \
844 struct device *dev, \
845 struct device_attribute *mattr, \
848 struct mem_ctl_info *mci = dev_get_drvdata(dev); \
849 struct i7core_pvt *pvt; \
851 pvt = mci->pvt_info; \
852 edac_dbg(1, "pvt=%p\n", pvt); \
853 if (pvt->inject.param < 0) \
854 return sprintf(data, "any\n"); \
856 return sprintf(data, "%d\n", pvt->inject.param);\
859 #define ATTR_ADDR_MATCH(param) \
860 static DEVICE_ATTR(param, S_IRUGO | S_IWUSR, \
861 i7core_inject_show_##param, \
862 i7core_inject_store_##param)
878 static int write_and_test(
struct pci_dev *dev,
const int where,
const u32 val)
883 edac_dbg(0,
"setting pci %02x:%02x.%x reg=%02x value=%08x\n",
887 for (count = 0; count < 10; count++) {
890 pci_write_config_dword(dev, where, val);
891 pci_read_config_dword(dev, where, &read);
898 "write=%08x. Read=%08x\n",
923 static ssize_t i7core_inject_enable_store(
struct device *dev,
925 const char *data,
size_t count)
953 mask |= (pvt->
inject.dimm & 0x3
LL) << 35;
955 mask |= (pvt->
inject.dimm & 0x1
LL) << 36;
963 mask |= (pvt->
inject.rank & 0x1
LL) << 34;
965 mask |= (pvt->
inject.rank & 0x3
LL) << 34;
972 mask |= (pvt->
inject.bank & 0x15
LL) << 30;
978 mask |= (pvt->
inject.page & 0xffff) << 14;
984 mask |= (pvt->
inject.col & 0x3fff);
993 injectmask = (pvt->
inject.type & 1) |
994 (pvt->
inject.section & 0x3) << 1 |
995 (pvt->
inject.type & 0x6) << (3 - 1);
1020 edac_dbg(0,
"Error inject addr match 0x%016llx, ecc 0x%08x, inject 0x%08x\n",
1021 mask, pvt->
inject.eccmask, injectmask);
1027 static ssize_t i7core_inject_enable_show(
struct device *dev,
1038 pci_read_config_dword(pvt->
pci_ch[pvt->
inject.channel][0],
1041 edac_dbg(0,
"Inject error read: 0x%018x\n", injectmask);
1043 if (injectmask & 0x0c)
1049 #define DECLARE_COUNTER(param) \
1050 static ssize_t i7core_show_counter_##param( \
1051 struct device *dev, \
1052 struct device_attribute *mattr, \
1055 struct mem_ctl_info *mci = dev_get_drvdata(dev); \
1056 struct i7core_pvt *pvt = mci->pvt_info; \
1058 edac_dbg(1, "\n"); \
1059 if (!pvt->ce_count_available || (pvt->is_registered)) \
1060 return sprintf(data, "data unavailable\n"); \
1061 return sprintf(data, "%lu\n", \
1062 pvt->udimm_ce_count[param]); \
1065 #define ATTR_COUNTER(param) \
1066 static DEVICE_ATTR(udimm##param, S_IRUGO | S_IWUSR, \
1067 i7core_show_counter_##param, \
1082 static struct attribute *i7core_addrmatch_attrs[] = {
1083 &dev_attr_channel.attr,
1084 &dev_attr_dimm.attr,
1085 &dev_attr_rank.attr,
1086 &dev_attr_bank.attr,
1087 &dev_attr_page.attr,
1093 .attrs = i7core_addrmatch_attrs,
1103 edac_dbg(1,
"Releasing device %s\n", dev_name(device));
1108 .groups = addrmatch_groups,
1109 .release = addrmatch_release,
1116 static struct attribute *i7core_udimm_counters_attrs[] = {
1117 &dev_attr_udimm0.attr,
1118 &dev_attr_udimm1.attr,
1119 &dev_attr_udimm2.attr,
1124 .attrs = i7core_udimm_counters_attrs,
1128 &all_channel_counts_grp,
1132 static void all_channel_counts_release(
struct device *device)
1134 edac_dbg(1,
"Releasing device %s\n", dev_name(device));
1138 static struct device_type all_channel_counts_type = {
1139 .groups = all_channel_counts_groups,
1140 .release = all_channel_counts_release,
1148 i7core_inject_section_show, i7core_inject_section_store);
1151 i7core_inject_type_show, i7core_inject_type_store);
1155 i7core_inject_eccmask_show, i7core_inject_eccmask_store);
1158 i7core_inject_enable_show, i7core_inject_enable_store);
1160 static int i7core_create_sysfs_devices(
struct mem_ctl_info *mci)
1220 static void i7core_delete_sysfs_devices(
struct mem_ctl_info *mci)
1247 static void i7core_put_devices(
struct i7core_dev *i7core_dev)
1252 for (i = 0; i < i7core_dev->
n_devs; i++) {
1256 edac_dbg(0,
"Removing dev %02x:%02x.%d\n",
1263 static void i7core_put_all_devices(
void)
1265 struct i7core_dev *i7core_dev, *
tmp;
1268 i7core_put_devices(i7core_dev);
1269 free_i7core_dev(i7core_dev);
1283 while (table && table->
descr) {
1294 static unsigned i7core_pci_lastbus(
void)
1296 int last_bus = 0,
bus;
1306 edac_dbg(0,
"Last bus %d\n", last_bus);
1317 static int i7core_get_onedevice(
struct pci_dev **
prev,
1319 const unsigned devno,
1320 const unsigned last_bus)
1322 struct i7core_dev *i7core_dev;
1330 dev_descr->
dev_id, *prev);
1359 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1360 dev_descr->
dev, dev_descr->
func,
1366 bus = pdev->
bus->number;
1368 socket = last_bus -
bus;
1370 i7core_dev = get_i7core_dev(socket);
1372 i7core_dev = alloc_i7core_dev(socket, table);
1379 if (i7core_dev->
pdev[devno]) {
1381 "Duplicated device for "
1382 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1383 bus, dev_descr->
dev, dev_descr->
func,
1395 "Device PCI ID %04x:%04x "
1396 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
1399 bus, dev_descr->
dev, dev_descr->
func);
1407 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1408 bus, dev_descr->
dev, dev_descr->
func,
1413 edac_dbg(0,
"Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
1414 socket, bus, dev_descr->
dev,
1430 static int i7core_get_all_devices(
void)
1432 int i,
rc, last_bus;
1436 last_bus = i7core_pci_lastbus();
1438 while (table && table->
descr) {
1439 for (i = 0; i < table->
n_devs; i++) {
1442 rc = i7core_get_onedevice(&pdev, table, i,
1449 i7core_put_all_devices();
1461 struct i7core_dev *i7core_dev)
1470 for (i = 0; i < i7core_dev->
n_devs; i++) {
1471 pdev = i7core_dev->
pdev[
i];
1481 }
else if (
likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1485 }
else if (!slot && !func) {
1491 family =
"Xeon 35xx/ i7core";
1495 family =
"i7-800/i5-700";
1499 family =
"Xeon 34xx";
1503 family =
"Xeon 55xx";
1507 family =
"Xeon 56xx / i7-900";
1514 edac_dbg(0,
"Detected a processor type %s\n", family);
1518 edac_dbg(0,
"Associated fn %d.%d, dev = %p, socket %d\n",
1520 pdev, i7core_dev->
socket);
1531 "is out of the expected range\n",
1540 static void i7core_rdimm_update_ce_count(
struct mem_ctl_info *mci,
1547 int add0 = 0, add1 = 0, add2 = 0;
1579 chan, 0, -1,
"error",
"");
1583 chan, 1, -1,
"error",
"");
1587 chan, 2, -1,
"error",
"");
1590 static void i7core_rdimm_check_mc_ecc_err(
struct mem_ctl_info *mci)
1594 int i, new0, new1, new2;
1609 for (i = 0 ; i < 3; i++) {
1610 edac_dbg(3,
"MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1611 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1613 if (pvt->
channel[i].dimms > 2) {
1625 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
1635 static void i7core_udimm_check_mc_ecc_err(
struct mem_ctl_info *mci)
1639 int new0, new1, new2;
1642 edac_dbg(0,
"MCR registers not found\n");
1658 int add0, add1, add2;
1676 if (add0 | add1 | add2)
1678 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1702 static void i7core_mce_output_error(
struct mem_ctl_info *mci,
1703 const struct mce *
m)
1709 bool uncorrected_error = m->
mcgstatus & 1ll << 61;
1711 u32 optypenum = (m->
status >> 4) & 0x07;
1712 u32 core_err_cnt = (m->
status >> 38) & 0x7fff;
1713 u32 dimm = (m->
misc >> 16) & 0x3;
1718 if (uncorrected_error) {
1731 switch (optypenum) {
1733 optype =
"generic undef request";
1736 optype =
"read error";
1739 optype =
"write error";
1742 optype =
"addr/cmd error";
1745 optype =
"scrubbing error";
1748 optype =
"reserved";
1754 err =
"read ECC error";
1757 err =
"RAS ECC error";
1760 err =
"write parity error";
1763 err =
"redundacy loss";
1769 err =
"memory range error";
1772 err =
"RTID out of range";
1775 err =
"address parity error";
1778 err =
"byte enable parity error";
1802 static void i7core_check_error(
struct mem_ctl_info *mci)
1818 goto check_ce_error;
1845 for (i = 0; i <
count; i++)
1853 i7core_udimm_check_mc_ecc_err(mci);
1855 i7core_rdimm_check_mc_ecc_err(mci);
1866 static int i7core_mce_check_error(
struct notifier_block *nb,
unsigned long val,
1869 struct mce *
mce = (
struct mce *)data;
1870 struct i7core_dev *i7_dev;
1874 i7_dev = get_i7core_dev(mce->
socketid);
1885 if (((mce->
status & 0xffff) >> 7) != 1)
1906 i7core_check_error(mci);
1913 .notifier_call = i7core_mce_check_error,
1947 static void decode_dclk(
const struct dmi_header *dh,
void *_dclk_freq)
1949 int *dclk_freq = _dclk_freq;
1952 if (*dclk_freq == -1)
1957 (
struct memdev_dmi_entry *)dh;
1958 unsigned long conf_mem_clk_speed_offset =
1960 (
unsigned long)&memdev_dmi_entry->
type;
1961 unsigned long speed_offset =
1962 (
unsigned long)&memdev_dmi_entry->
speed -
1963 (
unsigned long)&memdev_dmi_entry->
type;
1966 if (memdev_dmi_entry->
size == 0)
1973 if (memdev_dmi_entry->
length > conf_mem_clk_speed_offset) {
1976 }
else if (memdev_dmi_entry->
length > speed_offset) {
1977 dmi_mem_clk_speed = memdev_dmi_entry->
speed;
1983 if (*dclk_freq == 0) {
1985 if (dmi_mem_clk_speed > 0) {
1987 *dclk_freq = dmi_mem_clk_speed;
1992 }
else if (*dclk_freq > 0 &&
1993 *dclk_freq != dmi_mem_clk_speed) {
2008 #define DEFAULT_DCLK_FREQ 800
2010 static int get_dclk_freq(
void)
2014 dmi_walk(decode_dclk, (
void *)&dclk_freq);
2055 unsigned long long scrub_interval;
2060 scrub_interval = (
unsigned long long)freq_dclk_mhz *
2061 cache_line_size * 1000000;
2062 do_div(scrub_interval, new_bw);
2089 static int get_sdram_scrub_rate(
struct mem_ctl_info *mci)
2095 unsigned long long scrub_rate;
2112 scrub_rate = (
unsigned long long)freq_dclk_mhz *
2113 1000000 * cache_line_size;
2114 do_div(scrub_rate, scrubval);
2115 return (
int)scrub_rate;
2118 static void enable_sdram_scrub_setting(
struct mem_ctl_info *mci)
2133 static void disable_sdram_scrub_setting(
struct mem_ctl_info *mci)
2145 static void i7core_pci_ctl_create(
struct i7core_pvt *pvt)
2152 "Unable to setup PCI error report via EDAC\n");
2155 static void i7core_pci_ctl_release(
struct i7core_pvt *pvt)
2161 "Couldn't find mem_ctl_info for socket %d\n",
2166 static void i7core_unregister_mci(
struct i7core_dev *i7core_dev)
2172 edac_dbg(0,
"MC: dev = %p\n", &i7core_dev->
pdev[0]->dev);
2180 edac_dbg(0,
"MC: mci = %p, dev = %p\n", mci, &i7core_dev->
pdev[0]->dev);
2184 disable_sdram_scrub_setting(mci);
2187 i7core_pci_ctl_release(pvt);
2190 i7core_delete_sysfs_devices(mci);
2199 static int i7core_register_mci(
struct i7core_dev *i7core_dev)
2210 layers[0].is_virt_csrow =
false;
2213 layers[1].is_virt_csrow =
true;
2219 edac_dbg(0,
"MC: mci = %p, dev = %p\n", mci, &i7core_dev->
pdev[0]->dev);
2222 memset(pvt, 0,
sizeof(*pvt));
2226 i7core_dev->
mci = mci;
2244 rc = mci_bind_devs(mci, i7core_dev);
2250 get_dimm_config(mci);
2252 mci->
pdev = &i7core_dev->
pdev[0]->dev;
2258 enable_sdram_scrub_setting(mci);
2262 edac_dbg(0,
"MC: failed edac_mc_add_mc()\n");
2270 if (i7core_create_sysfs_devices(mci)) {
2271 edac_dbg(0,
"MC: failed to create sysfs nodes\n");
2286 i7core_pci_ctl_create(pvt);
2312 struct i7core_dev *i7core_dev;
2326 rc = i7core_get_all_devices();
2332 rc = i7core_register_mci(i7core_dev);
2351 "Driver loaded, %d memory controller(s) found.\n",
2359 i7core_unregister_mci(i7core_dev);
2361 i7core_put_all_devices();
2373 struct i7core_dev *i7core_dev;
2393 i7core_unregister_mci(i7core_dev);
2396 i7core_put_all_devices();
2410 .
name =
"i7core_edac",
2411 .probe = i7core_probe,
2413 .id_table = i7core_pci_tbl,
2420 static int __init i7core_init(
void)
2430 i7core_xeon_pci_fixup(pci_dev_table);
2432 pci_rc = pci_register_driver(&i7core_driver);
2449 static void __exit i7core_exit(
void)