23 #include <linux/module.h>
25 #include <linux/pci.h>
37 #define I5100_MC_SCRBEN_MASK (1 << 7)
38 #define I5100_MC_SCRBDONE_MASK (1 << 4)
40 #define I5100_SPDDATA 0x48
41 #define I5100_SPDCMD 0x4c
42 #define I5100_TOLM 0x6c
43 #define I5100_MIR0 0x80
44 #define I5100_MIR1 0x84
45 #define I5100_AMIR_0 0x8c
46 #define I5100_AMIR_1 0x90
47 #define I5100_FERR_NF_MEM 0xa0
48 #define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16)
49 #define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15)
50 #define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14)
51 #define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12)
52 #define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11)
53 #define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10)
54 #define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6)
55 #define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5)
56 #define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4)
57 #define I5100_FERR_NF_MEM_M1ERR_MASK (1 << 1)
58 #define I5100_FERR_NF_MEM_ANY_MASK \
59 (I5100_FERR_NF_MEM_M16ERR_MASK | \
60 I5100_FERR_NF_MEM_M15ERR_MASK | \
61 I5100_FERR_NF_MEM_M14ERR_MASK | \
62 I5100_FERR_NF_MEM_M12ERR_MASK | \
63 I5100_FERR_NF_MEM_M11ERR_MASK | \
64 I5100_FERR_NF_MEM_M10ERR_MASK | \
65 I5100_FERR_NF_MEM_M6ERR_MASK | \
66 I5100_FERR_NF_MEM_M5ERR_MASK | \
67 I5100_FERR_NF_MEM_M4ERR_MASK | \
68 I5100_FERR_NF_MEM_M1ERR_MASK)
69 #define I5100_NERR_NF_MEM 0xa4
70 #define I5100_EMASK_MEM 0xa8
73 #define I5100_MTR_0 0x154
74 #define I5100_DMIR 0x15c
75 #define I5100_VALIDLOG 0x18c
76 #define I5100_NRECMEMA 0x190
77 #define I5100_NRECMEMB 0x194
78 #define I5100_REDMEMA 0x198
79 #define I5100_REDMEMB 0x19c
80 #define I5100_RECMEMA 0x1a0
81 #define I5100_RECMEMB 0x1a4
82 #define I5100_MTR_4 0x1b0
86 static inline u32 i5100_mc_scrben(
u32 mc)
91 static inline u32 i5100_mc_errdeten(
u32 mc)
96 static inline u32 i5100_mc_scrbdone(
u32 mc)
101 static inline u16 i5100_spddata_rdo(
u16 a)
106 static inline u16 i5100_spddata_sbe(
u16 a)
111 static inline u16 i5100_spddata_busy(
u16 a)
116 static inline u16 i5100_spddata_data(
u16 a)
118 return a & ((1 << 8) - 1);
124 return ((dti & ((1 << 4) - 1)) << 28) |
125 ((ckovrd & 1) << 27) |
126 ((sa & ((1 << 3) - 1)) << 24) |
127 ((ba & ((1 << 8) - 1)) << 16) |
128 ((data & ((1 << 8) - 1)) << 8) |
132 static inline u16 i5100_tolm_tolm(
u16 a)
134 return a >> 12 & ((1 << 4) - 1);
137 static inline u16 i5100_mir_limit(
u16 a)
139 return a >> 4 & ((1 << 12) - 1);
142 static inline u16 i5100_mir_way1(
u16 a)
147 static inline u16 i5100_mir_way0(
u16 a)
152 static inline u32 i5100_ferr_nf_mem_chan_indx(
u32 a)
157 static inline u32 i5100_ferr_nf_mem_any(
u32 a)
162 static inline u32 i5100_nerr_nf_mem_any(
u32 a)
164 return i5100_ferr_nf_mem_any(a);
167 static inline u32 i5100_dmir_limit(
u32 a)
169 return a >> 16 & ((1 << 11) - 1);
174 return a >> (4 *
i) & ((1 << 2) - 1);
177 static inline u16 i5100_mtr_present(
u16 a)
182 static inline u16 i5100_mtr_ethrottle(
u16 a)
187 static inline u16 i5100_mtr_width(
u16 a)
192 static inline u16 i5100_mtr_numbank(
u16 a)
197 static inline u16 i5100_mtr_numrow(
u16 a)
199 return a >> 2 & ((1 << 2) - 1);
202 static inline u16 i5100_mtr_numcol(
u16 a)
204 return a & ((1 << 2) - 1);
208 static inline u32 i5100_validlog_redmemvalid(
u32 a)
213 static inline u32 i5100_validlog_recmemvalid(
u32 a)
218 static inline u32 i5100_validlog_nrecmemvalid(
u32 a)
223 static inline u32 i5100_nrecmema_merr(
u32 a)
225 return a >> 15 & ((1 << 5) - 1);
228 static inline u32 i5100_nrecmema_bank(
u32 a)
230 return a >> 12 & ((1 << 3) - 1);
233 static inline u32 i5100_nrecmema_rank(
u32 a)
235 return a >> 8 & ((1 << 3) - 1);
238 static inline u32 i5100_nrecmema_dm_buf_id(
u32 a)
240 return a & ((1 << 8) - 1);
243 static inline u32 i5100_nrecmemb_cas(
u32 a)
245 return a >> 16 & ((1 << 13) - 1);
248 static inline u32 i5100_nrecmemb_ras(
u32 a)
250 return a & ((1 << 16) - 1);
253 static inline u32 i5100_redmemb_ecc_locator(
u32 a)
255 return a & ((1 << 18) - 1);
258 static inline u32 i5100_recmema_merr(
u32 a)
260 return i5100_nrecmema_merr(a);
263 static inline u32 i5100_recmema_bank(
u32 a)
265 return i5100_nrecmema_bank(a);
268 static inline u32 i5100_recmema_rank(
u32 a)
270 return i5100_nrecmema_rank(a);
273 static inline u32 i5100_recmema_dm_buf_id(
u32 a)
275 return i5100_nrecmema_dm_buf_id(a);
278 static inline u32 i5100_recmemb_cas(
u32 a)
280 return i5100_nrecmemb_cas(a);
283 static inline u32 i5100_recmemb_ras(
u32 a)
285 return i5100_nrecmemb_ras(a);
289 #define I5100_MAX_RANKS_PER_CHAN 6
290 #define I5100_CHANNELS 2
291 #define I5100_MAX_RANKS_PER_DIMM 4
292 #define I5100_DIMM_ADDR_LINES (6 - 3)
293 #define I5100_MAX_DIMM_SLOTS_PER_CHAN 4
294 #define I5100_MAX_RANK_INTERLEAVE 4
295 #define I5100_MAX_DMIRS 5
296 #define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ)
359 for (j = 0; j < numrank; j++)
367 static const char *i5100_err_msg(
unsigned err)
369 static const char *merrs[] = {
371 "uncorrectable data ECC on replay",
374 "aliased uncorrectable demand data ECC",
375 "aliased uncorrectable spare-copy data ECC",
376 "aliased uncorrectable patrol data ECC",
380 "non-aliased uncorrectable demand data ECC",
381 "non-aliased uncorrectable spare-copy data ECC",
382 "non-aliased uncorrectable patrol data ECC",
384 "correctable demand data ECC",
385 "correctable spare-copy data ECC",
386 "correctable patrol data ECC",
388 "SPD protocol error",
390 "spare copy initiated",
391 "spare copy completed",
403 static int i5100_csrow_to_rank(
const struct mem_ctl_info *
mci,
int csrow)
411 static int i5100_csrow_to_chan(
const struct mem_ctl_info *
mci,
int csrow)
422 unsigned long syndrome,
431 "bank %u, cas %u, ras %u\n",
444 unsigned long syndrome,
453 "bank %u, cas %u, ras %u\n",
462 static void i5100_read_log(
struct mem_ctl_info *mci,
int chan,
469 unsigned syndrome = 0;
470 unsigned ecc_loc = 0;
479 if (i5100_validlog_redmemvalid(dw)) {
483 ecc_loc = i5100_redmemb_ecc_locator(dw2);
486 if (i5100_validlog_recmemvalid(dw)) {
490 merr = i5100_recmema_merr(dw2);
491 bank = i5100_recmema_bank(dw2);
492 rank = i5100_recmema_rank(dw2);
495 cas = i5100_recmemb_cas(dw2);
496 ras = i5100_recmemb_ras(dw2);
501 msg = i5100_err_msg(ferr);
503 msg = i5100_err_msg(nerr);
505 i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg);
508 if (i5100_validlog_nrecmemvalid(dw)) {
512 merr = i5100_nrecmema_merr(dw2);
513 bank = i5100_nrecmema_bank(dw2);
514 rank = i5100_nrecmema_rank(dw2);
517 cas = i5100_nrecmemb_cas(dw2);
518 ras = i5100_nrecmemb_ras(dw2);
523 msg = i5100_err_msg(ferr);
525 msg = i5100_err_msg(nerr);
527 i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg);
539 if (i5100_ferr_nf_mem_any(dw)) {
543 i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw),
544 i5100_ferr_nf_mem_any(dw),
545 i5100_nerr_nf_mem_any(dw2));
568 pci_read_config_dword(priv->
mc,
I5100_MC, &dw);
572 pci_read_config_dword(priv->
mc,
I5100_MC, &dw);
574 if (i5100_mc_scrbdone(dw)) {
576 pci_write_config_dword(priv->
mc,
I5100_MC, dw);
577 pci_read_config_dword(priv->
mc,
I5100_MC, &dw);
592 pci_read_config_dword(priv->
mc,
I5100_MC, &dw);
603 pci_write_config_dword(priv->
mc,
I5100_MC, dw);
605 pci_read_config_dword(priv->
mc,
I5100_MC, &dw);
607 bandwidth = 5900000 * i5100_mc_scrben(dw);
612 static int i5100_get_scrub_rate(
struct mem_ctl_info *mci)
617 pci_read_config_dword(priv->
mc,
I5100_MC, &dw);
619 return 5900000 * i5100_mc_scrben(dw);
645 const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
646 const unsigned chan = i5100_csrow_to_chan(mci, csrow);
650 if (!priv->
mtr[chan][chan_rank].present)
655 priv->
mtr[chan][chan_rank].numcol +
656 priv->
mtr[chan][chan_rank].numrow +
657 priv->
mtr[chan][chan_rank].numbank;
659 return (
unsigned long)
674 const unsigned addr =
679 pci_read_config_word(pdev, addr, &w);
681 priv->
mtr[
i][
j].present = i5100_mtr_present(w);
682 priv->
mtr[
i][
j].ethrottle = i5100_mtr_ethrottle(w);
683 priv->
mtr[
i][
j].width = 4 + 4 * i5100_mtr_width(w);
684 priv->
mtr[
i][
j].numbank = 2 + i5100_mtr_numbank(w);
685 priv->
mtr[
i][
j].numrow = 13 + i5100_mtr_numrow(w);
686 priv->
mtr[
i][
j].numcol = 10 + i5100_mtr_numcol(w);
695 static int i5100_read_spd_byte(
const struct mem_ctl_info *mci,
703 if (i5100_spddata_busy(w))
707 i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr,
715 if (!i5100_spddata_busy(w))
720 if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w))
723 *byte = i5100_spddata_data(w);
777 if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
784 i5100_init_dimm_csmap(mci);
797 priv->
tolm = (
u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024;
800 priv->
mir[0].limit = (
u64) i5100_mir_limit(w) << 28;
801 priv->
mir[0].way[1] = i5100_mir_way1(w);
802 priv->
mir[0].way[0] = i5100_mir_way0(w);
805 priv->
mir[1].limit = (
u64) i5100_mir_limit(w) << 28;
806 priv->
mir[1].way[1] = i5100_mir_way1(w);
807 priv->
mir[1].way[0] = i5100_mir_way0(w);
817 for (j = 0; j < 5; j++) {
820 pci_read_config_dword(mms[i],
I5100_DMIR + j * 4, &dw);
823 (
u64) i5100_dmir_limit(dw) << 28;
825 priv->
dmir[i][j].rank[k] =
826 i5100_dmir_rank(dw, k);
840 const unsigned long npages = i5100_npages(mci, i);
841 const unsigned chan = i5100_csrow_to_chan(mci, i);
842 const unsigned rank = i5100_csrow_to_rank(mci, i);
853 dimm->
dtype = (priv->
mtr[chan][rank].width == 4) ?
859 i5100_rank_to_slot(mci, chan, rank));
862 edac_dbg(2,
"dimm channel %d, rank %d, size %ld\n",
889 pci_read_config_dword(pdev,
I5100_MC, &dw);
890 if (!i5100_mc_errdeten(dw)) {
897 pci_read_config_dword(pdev,
I5100_MS, &dw);
898 ranksperch = !!(dw & (1 << 8)) * 2 + 4;
924 goto bail_disable_ch0;
935 layers[0].is_virt_csrow =
false;
937 layers[1].size = ranksperch;
938 layers[1].is_virt_csrow =
true;
943 goto bail_disable_ch1;
957 pci_read_config_dword(pdev,
I5100_MC, &dw);
958 if (i5100_mc_scrben(dw)) {
964 i5100_init_dimm_layout(pdev, mci);
965 i5100_init_interleaving(pdev, mci);
971 mci->
mod_ver =
"not versioned";
980 i5100_init_csrows(mci);
1055 .name = KBUILD_BASENAME,
1056 .probe = i5100_init_one,
1058 .id_table = i5100_pci_tbl,
1061 static int __init i5100_init(
void)
1065 pci_rc = pci_register_driver(&i5100_driver);
1067 return (pci_rc < 0) ? pci_rc : 0;
1070 static void __exit i5100_exit(
void)