3333#define I10NM_GET_DIMMMTR (m , i , j ) \
3434 readl((m)->mbase + ((m)->hbm_mc ? 0x80c : 0x2080c) + \
3535 (i) * (m)->chan_mmio_sz + (j) * 4)
36- #define I10NM_GET_MCDDRTCFG (m , i , j ) \
36+ #define I10NM_GET_MCDDRTCFG (m , i ) \
3737 readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
38- (i) * (m)->chan_mmio_sz + (j) * 4 )
38+ (i) * (m)->chan_mmio_sz)
3939#define I10NM_GET_MCMTR (m , i ) \
4040 readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : 0x20ef8) + \
4141 (i) * (m)->chan_mmio_sz)
@@ -77,28 +77,42 @@ static int retry_rd_err_log;
7777
7878static u32 offsets_scrub_icx [] = {0x22c60 , 0x22c54 , 0x22c5c , 0x22c58 , 0x22c28 , 0x20ed8 };
7979static u32 offsets_scrub_spr [] = {0x22c60 , 0x22c54 , 0x22f08 , 0x22c58 , 0x22c28 , 0x20ed8 };
80+ static u32 offsets_scrub_spr_hbm0 [] = {0x2860 , 0x2854 , 0x2b08 , 0x2858 , 0x2828 , 0x0ed8 };
81+ static u32 offsets_scrub_spr_hbm1 [] = {0x2c60 , 0x2c54 , 0x2f08 , 0x2c58 , 0x2c28 , 0x0fa8 };
8082static u32 offsets_demand_icx [] = {0x22e54 , 0x22e60 , 0x22e64 , 0x22e58 , 0x22e5c , 0x20ee0 };
8183static u32 offsets_demand_spr [] = {0x22e54 , 0x22e60 , 0x22f10 , 0x22e58 , 0x22e5c , 0x20ee0 };
84+ static u32 offsets_demand2_spr [] = {0x22c70 , 0x22d80 , 0x22f18 , 0x22d58 , 0x22c64 , 0x20f10 };
85+ static u32 offsets_demand_spr_hbm0 [] = {0x2a54 , 0x2a60 , 0x2b10 , 0x2a58 , 0x2a5c , 0x0ee0 };
86+ static u32 offsets_demand_spr_hbm1 [] = {0x2e54 , 0x2e60 , 0x2f10 , 0x2e58 , 0x2e5c , 0x0fb0 };
8287
83- static void __enable_retry_rd_err_log (struct skx_imc * imc , int chan , bool enable )
88+ static void __enable_retry_rd_err_log (struct skx_imc * imc , int chan , bool enable ,
89+ u32 * offsets_scrub , u32 * offsets_demand ,
90+ u32 * offsets_demand2 )
8491{
85- u32 s , d ;
86-
87- if (!imc -> mbase )
88- return ;
92+ u32 s , d , d2 ;
8993
90- s = I10NM_GET_REG32 (imc , chan , res_cfg -> offsets_scrub [0 ]);
91- d = I10NM_GET_REG32 (imc , chan , res_cfg -> offsets_demand [0 ]);
94+ s = I10NM_GET_REG32 (imc , chan , offsets_scrub [0 ]);
95+ d = I10NM_GET_REG32 (imc , chan , offsets_demand [0 ]);
96+ if (offsets_demand2 )
97+ d2 = I10NM_GET_REG32 (imc , chan , offsets_demand2 [0 ]);
9298
9399 if (enable ) {
94100 /* Save default configurations */
95101 imc -> chan [chan ].retry_rd_err_log_s = s ;
96102 imc -> chan [chan ].retry_rd_err_log_d = d ;
103+ if (offsets_demand2 )
104+ imc -> chan [chan ].retry_rd_err_log_d2 = d2 ;
97105
98106 s &= ~RETRY_RD_ERR_LOG_NOOVER_UC ;
99107 s |= RETRY_RD_ERR_LOG_EN ;
100108 d &= ~RETRY_RD_ERR_LOG_NOOVER_UC ;
101109 d |= RETRY_RD_ERR_LOG_EN ;
110+
111+ if (offsets_demand2 ) {
112+ d2 &= ~RETRY_RD_ERR_LOG_UC ;
113+ d2 |= RETRY_RD_ERR_LOG_NOOVER ;
114+ d2 |= RETRY_RD_ERR_LOG_EN ;
115+ }
102116 } else {
103117 /* Restore default configurations */
104118 if (imc -> chan [chan ].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC )
@@ -113,23 +127,55 @@ static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable
113127 d |= RETRY_RD_ERR_LOG_NOOVER ;
114128 if (!(imc -> chan [chan ].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN ))
115129 d &= ~RETRY_RD_ERR_LOG_EN ;
130+
131+ if (offsets_demand2 ) {
132+ if (imc -> chan [chan ].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC )
133+ d2 |= RETRY_RD_ERR_LOG_UC ;
134+ if (!(imc -> chan [chan ].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER ))
135+ d2 &= ~RETRY_RD_ERR_LOG_NOOVER ;
136+ if (!(imc -> chan [chan ].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN ))
137+ d2 &= ~RETRY_RD_ERR_LOG_EN ;
138+ }
116139 }
117140
118- I10NM_SET_REG32 (imc , chan , res_cfg -> offsets_scrub [0 ], s );
119- I10NM_SET_REG32 (imc , chan , res_cfg -> offsets_demand [0 ], d );
141+ I10NM_SET_REG32 (imc , chan , offsets_scrub [0 ], s );
142+ I10NM_SET_REG32 (imc , chan , offsets_demand [0 ], d );
143+ if (offsets_demand2 )
144+ I10NM_SET_REG32 (imc , chan , offsets_demand2 [0 ], d2 );
120145}
121146
122147static void enable_retry_rd_err_log (bool enable )
123148{
149+ struct skx_imc * imc ;
124150 struct skx_dev * d ;
125151 int i , j ;
126152
127153 edac_dbg (2 , "\n" );
128154
129155 list_for_each_entry (d , i10nm_edac_list , list )
130- for (i = 0 ; i < I10NM_NUM_IMC ; i ++ )
131- for (j = 0 ; j < I10NM_NUM_CHANNELS ; j ++ )
132- __enable_retry_rd_err_log (& d -> imc [i ], j , enable );
156+ for (i = 0 ; i < I10NM_NUM_IMC ; i ++ ) {
157+ imc = & d -> imc [i ];
158+ if (!imc -> mbase )
159+ continue ;
160+
161+ for (j = 0 ; j < I10NM_NUM_CHANNELS ; j ++ ) {
162+ if (imc -> hbm_mc ) {
163+ __enable_retry_rd_err_log (imc , j , enable ,
164+ res_cfg -> offsets_scrub_hbm0 ,
165+ res_cfg -> offsets_demand_hbm0 ,
166+ NULL );
167+ __enable_retry_rd_err_log (imc , j , enable ,
168+ res_cfg -> offsets_scrub_hbm1 ,
169+ res_cfg -> offsets_demand_hbm1 ,
170+ NULL );
171+ } else {
172+ __enable_retry_rd_err_log (imc , j , enable ,
173+ res_cfg -> offsets_scrub ,
174+ res_cfg -> offsets_demand ,
175+ res_cfg -> offsets_demand2 );
176+ }
177+ }
178+ }
133179}
134180
135181static void show_retry_rd_err_log (struct decoded_addr * res , char * msg ,
@@ -138,35 +184,86 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
138184 struct skx_imc * imc = & res -> dev -> imc [res -> imc ];
139185 u32 log0 , log1 , log2 , log3 , log4 ;
140186 u32 corr0 , corr1 , corr2 , corr3 ;
187+ u32 lxg0 , lxg1 , lxg3 , lxg4 ;
188+ u32 * xffsets = NULL ;
141189 u64 log2a , log5 ;
190+ u64 lxg2a , lxg5 ;
142191 u32 * offsets ;
143- int n ;
192+ int n , pch ;
144193
145194 if (!imc -> mbase )
146195 return ;
147196
148- offsets = scrub_err ? res_cfg -> offsets_scrub : res_cfg -> offsets_demand ;
197+ if (imc -> hbm_mc ) {
198+ pch = res -> cs & 1 ;
199+
200+ if (pch )
201+ offsets = scrub_err ? res_cfg -> offsets_scrub_hbm1 :
202+ res_cfg -> offsets_demand_hbm1 ;
203+ else
204+ offsets = scrub_err ? res_cfg -> offsets_scrub_hbm0 :
205+ res_cfg -> offsets_demand_hbm0 ;
206+ } else {
207+ if (scrub_err ) {
208+ offsets = res_cfg -> offsets_scrub ;
209+ } else {
210+ offsets = res_cfg -> offsets_demand ;
211+ xffsets = res_cfg -> offsets_demand2 ;
212+ }
213+ }
149214
150215 log0 = I10NM_GET_REG32 (imc , res -> channel , offsets [0 ]);
151216 log1 = I10NM_GET_REG32 (imc , res -> channel , offsets [1 ]);
152217 log3 = I10NM_GET_REG32 (imc , res -> channel , offsets [3 ]);
153218 log4 = I10NM_GET_REG32 (imc , res -> channel , offsets [4 ]);
154219 log5 = I10NM_GET_REG64 (imc , res -> channel , offsets [5 ]);
155220
221+ if (xffsets ) {
222+ lxg0 = I10NM_GET_REG32 (imc , res -> channel , xffsets [0 ]);
223+ lxg1 = I10NM_GET_REG32 (imc , res -> channel , xffsets [1 ]);
224+ lxg3 = I10NM_GET_REG32 (imc , res -> channel , xffsets [3 ]);
225+ lxg4 = I10NM_GET_REG32 (imc , res -> channel , xffsets [4 ]);
226+ lxg5 = I10NM_GET_REG64 (imc , res -> channel , xffsets [5 ]);
227+ }
228+
156229 if (res_cfg -> type == SPR ) {
157230 log2a = I10NM_GET_REG64 (imc , res -> channel , offsets [2 ]);
158- n = snprintf (msg , len , " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx] " ,
231+ n = snprintf (msg , len , " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx" ,
159232 log0 , log1 , log2a , log3 , log4 , log5 );
233+
234+ if (len - n > 0 ) {
235+ if (xffsets ) {
236+ lxg2a = I10NM_GET_REG64 (imc , res -> channel , xffsets [2 ]);
237+ n += snprintf (msg + n , len - n , " %.8x %.8x %.16llx %.8x %.8x %.16llx]" ,
238+ lxg0 , lxg1 , lxg2a , lxg3 , lxg4 , lxg5 );
239+ } else {
240+ n += snprintf (msg + n , len - n , "]" );
241+ }
242+ }
160243 } else {
161244 log2 = I10NM_GET_REG32 (imc , res -> channel , offsets [2 ]);
162245 n = snprintf (msg , len , " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]" ,
163246 log0 , log1 , log2 , log3 , log4 , log5 );
164247 }
165248
166- corr0 = I10NM_GET_REG32 (imc , res -> channel , 0x22c18 );
167- corr1 = I10NM_GET_REG32 (imc , res -> channel , 0x22c1c );
168- corr2 = I10NM_GET_REG32 (imc , res -> channel , 0x22c20 );
169- corr3 = I10NM_GET_REG32 (imc , res -> channel , 0x22c24 );
249+ if (imc -> hbm_mc ) {
250+ if (pch ) {
251+ corr0 = I10NM_GET_REG32 (imc , res -> channel , 0x2c18 );
252+ corr1 = I10NM_GET_REG32 (imc , res -> channel , 0x2c1c );
253+ corr2 = I10NM_GET_REG32 (imc , res -> channel , 0x2c20 );
254+ corr3 = I10NM_GET_REG32 (imc , res -> channel , 0x2c24 );
255+ } else {
256+ corr0 = I10NM_GET_REG32 (imc , res -> channel , 0x2818 );
257+ corr1 = I10NM_GET_REG32 (imc , res -> channel , 0x281c );
258+ corr2 = I10NM_GET_REG32 (imc , res -> channel , 0x2820 );
259+ corr3 = I10NM_GET_REG32 (imc , res -> channel , 0x2824 );
260+ }
261+ } else {
262+ corr0 = I10NM_GET_REG32 (imc , res -> channel , 0x22c18 );
263+ corr1 = I10NM_GET_REG32 (imc , res -> channel , 0x22c1c );
264+ corr2 = I10NM_GET_REG32 (imc , res -> channel , 0x22c20 );
265+ corr3 = I10NM_GET_REG32 (imc , res -> channel , 0x22c24 );
266+ }
170267
171268 if (len - n > 0 )
172269 snprintf (msg + n , len - n ,
@@ -177,9 +274,16 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
177274 corr3 & 0xffff , corr3 >> 16 );
178275
179276 /* Clear status bits */
180- if (retry_rd_err_log == 2 && (log0 & RETRY_RD_ERR_LOG_OVER_UC_V )) {
181- log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V ;
182- I10NM_SET_REG32 (imc , res -> channel , offsets [0 ], log0 );
277+ if (retry_rd_err_log == 2 ) {
278+ if (log0 & RETRY_RD_ERR_LOG_OVER_UC_V ) {
279+ log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V ;
280+ I10NM_SET_REG32 (imc , res -> channel , offsets [0 ], log0 );
281+ }
282+
283+ if (xffsets && (lxg0 & RETRY_RD_ERR_LOG_OVER_UC_V )) {
284+ lxg0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V ;
285+ I10NM_SET_REG32 (imc , res -> channel , xffsets [0 ], lxg0 );
286+ }
183287 }
184288}
185289
@@ -198,11 +302,10 @@ static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
198302 if (unlikely (pci_enable_device (pdev ) < 0 )) {
199303 edac_dbg (2 , "Failed to enable device %02x:%02x.%x\n" ,
200304 bus , dev , fun );
305+ pci_dev_put (pdev );
201306 return NULL ;
202307 }
203308
204- pci_dev_get (pdev );
205-
206309 return pdev ;
207310}
208311
@@ -411,7 +514,12 @@ static struct res_config spr_cfg = {
411514 .sad_all_devfn = PCI_DEVFN (10 , 0 ),
412515 .sad_all_offset = 0x300 ,
413516 .offsets_scrub = offsets_scrub_spr ,
517+ .offsets_scrub_hbm0 = offsets_scrub_spr_hbm0 ,
518+ .offsets_scrub_hbm1 = offsets_scrub_spr_hbm1 ,
414519 .offsets_demand = offsets_demand_spr ,
520+ .offsets_demand2 = offsets_demand2_spr ,
521+ .offsets_demand_hbm0 = offsets_demand_spr_hbm0 ,
522+ .offsets_demand_hbm1 = offsets_demand_spr_hbm1 ,
415523};
416524
417525static const struct x86_cpu_id i10nm_cpuids [] = {
@@ -421,6 +529,7 @@ static const struct x86_cpu_id i10nm_cpuids[] = {
421529 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS (ICELAKE_X , X86_STEPPINGS (0x4 , 0xf ), & i10nm_cfg1 ),
422530 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS (ICELAKE_D , X86_STEPPINGS (0x0 , 0xf ), & i10nm_cfg1 ),
423531 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS (SAPPHIRERAPIDS_X , X86_STEPPINGS (0x0 , 0xf ), & spr_cfg ),
532+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS (EMERALDRAPIDS_X , X86_STEPPINGS (0x0 , 0xf ), & spr_cfg ),
424533 {}
425534};
426535MODULE_DEVICE_TABLE (x86cpu , i10nm_cpuids );
@@ -450,10 +559,10 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
450559
451560 ndimms = 0 ;
452561 amap = I10NM_GET_AMAP (imc , i );
562+ mcddrtcfg = I10NM_GET_MCDDRTCFG (imc , i );
453563 for (j = 0 ; j < imc -> num_dimms ; j ++ ) {
454564 dimm = edac_get_dimm (mci , i , j , 0 );
455565 mtr = I10NM_GET_DIMMMTR (imc , i , j );
456- mcddrtcfg = I10NM_GET_MCDDRTCFG (imc , i , j );
457566 edac_dbg (1 , "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n" ,
458567 mtr , mcddrtcfg , imc -> mc , i , j );
459568
0 commit comments