diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index acf806c87673cf..de0ef21e15011f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -440,9 +440,8 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) { int r; - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->ras_late_init) { - r = adev->umc.ras_funcs->ras_late_init(adev); + if (adev->umc.ras && adev->umc.ras->ras_block.ras_late_init) { + r = adev->umc.ras->ras_block.ras_late_init(adev, NULL); if (r) return r; } @@ -496,9 +495,8 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) { - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->ras_fini) - adev->umc.ras_funcs->ras_fini(adev); + if (adev->umc.ras && adev->umc.ras->ras_block.ras_fini) + adev->umc.ras->ras_block.ras_fini(adev); if (adev->mmhub.ras && adev->mmhub.ras->ras_block.ras_fini) adev->mmhub.ras->ras_block.ras_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 6d1ca9e9795bb3..fba1c415a2a833 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -939,24 +939,24 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d */ ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc)); if (ret == -EOPNOTSUPP) { - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_count) - adev->umc.ras_funcs->query_ras_error_count(adev, err_data); + if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && + adev->umc.ras->ras_block.hw_ops->query_ras_error_count) + adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); /* umc query_ras_error_address is also responsible for clearing * error status */ - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_address) - adev->umc.ras_funcs->query_ras_error_address(adev, err_data); + if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && + adev->umc.ras->ras_block.hw_ops->query_ras_error_address) + adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data); } else if (!ret) { - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->ecc_info_query_ras_error_count) - adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, err_data); + if (adev->umc.ras && + adev->umc.ras->ecc_info_query_ras_error_count) + adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data); - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->ecc_info_query_ras_error_address) - adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, err_data); + if (adev->umc.ras && + adev->umc.ras->ecc_info_query_ras_error_address) + adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data); } } @@ -2412,12 +2412,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev) } else if (adev->df.funcs && adev->df.funcs->query_ras_poison_mode && - adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_poison_mode) { + adev->umc.ras && + adev->umc.ras->query_ras_poison_mode) { df_poison = adev->df.funcs->query_ras_poison_mode(adev); umc_poison = - adev->umc.ras_funcs->query_ras_poison_mode(adev); + adev->umc.ras->query_ras_poison_mode(adev); /* Only poison is set in both DF and UMC, we can support it */ if (df_poison && umc_poison) con->poison_supported = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index e81ce465ff3abd..b4c68c09e0714f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -35,12 +35,12 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc)); if (ret == -EOPNOTSUPP) { - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_count) - adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status); + if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && + adev->umc.ras->ras_block.hw_ops->query_ras_error_count) + adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status); - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_address && + if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && + adev->umc.ras->ras_block.hw_ops->query_ras_error_address && adev->umc.max_ras_err_cnt_per_query) { err_data->err_addr = kcalloc(adev->umc.max_ras_err_cnt_per_query, @@ -56,15 +56,15 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, /* umc query_ras_error_address is also responsible for clearing * error status */ - adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status); + adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status); } } else if (!ret) { - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->ecc_info_query_ras_error_count) - adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, ras_error_status); + if (adev->umc.ras && + adev->umc.ras->ecc_info_query_ras_error_count) + adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status); - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->ecc_info_query_ras_error_address && + if (adev->umc.ras && + adev->umc.ras->ecc_info_query_ras_error_address && adev->umc.max_ras_err_cnt_per_query) { err_data->err_addr = kcalloc(adev->umc.max_ras_err_cnt_per_query, @@ -80,7 +80,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, /* umc query_ras_error_address is also responsible for clearing * error status */ - adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, ras_error_status); + adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status); } } @@ -136,7 +136,7 @@ static int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true); } -int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) +int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, void *ras_info) { int r; struct ras_fs_if fs_info = { @@ -172,9 +172,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) } /* ras init of specific umc version */ - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->err_cnt_init) - adev->umc.ras_funcs->err_cnt_init(adev); + if (adev->umc.ras && + adev->umc.ras->err_cnt_init) + adev->umc.ras->err_cnt_init(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index b72194e8bfe531..195740a6d97d57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -20,6 +20,7 @@ */ #ifndef __AMDGPU_UMC_H__ #define __AMDGPU_UMC_H__ +#include "amdgpu_ras.h" /* * (addr / 256) * 4096, the higher 26 bits in ErrorAddr @@ -40,14 +41,9 @@ #define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++) #define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst)) -struct amdgpu_umc_ras_funcs { +struct amdgpu_umc_ras { + struct amdgpu_ras_block_object ras_block; void (*err_cnt_init)(struct amdgpu_device *adev); - int (*ras_late_init)(struct amdgpu_device *adev); - void (*ras_fini)(struct amdgpu_device *adev); - void (*query_ras_error_count)(struct amdgpu_device *adev, - void *ras_error_status); - void (*query_ras_error_address)(struct amdgpu_device *adev, - void *ras_error_status); bool (*query_ras_poison_mode)(struct amdgpu_device *adev); void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); @@ -73,10 +69,10 @@ struct amdgpu_umc { struct ras_common_if *ras_if; const struct amdgpu_umc_funcs *funcs; - const struct amdgpu_umc_ras_funcs *ras_funcs; + struct amdgpu_umc_ras *ras; }; -int amdgpu_umc_ras_late_init(struct amdgpu_device *adev); +int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, void *ras_info); void amdgpu_umc_ras_fini(struct amdgpu_device *adev); int amdgpu_umc_poison_handler(struct amdgpu_device *adev, void *ras_error_status, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 38bb42727715d2..5ef4ad28ab26ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -664,11 +664,25 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM; adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA; adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0]; - adev->umc.ras_funcs = &umc_v8_7_ras_funcs; + adev->umc.ras = &umc_v8_7_ras; break; default: break; } + if (adev->umc.ras) { + amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); + + strcpy(adev->umc.ras->ras_block.name,"umc"); + adev->umc.ras->ras_block.block = AMDGPU_RAS_BLOCK__UMC; + + /* If don't define special ras_late_init function, use default ras_late_init */ + if (!adev->umc.ras->ras_block.ras_late_init) + adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; + + /* If don't define special ras_fini function, use default ras_fini */ + if (!adev->umc.ras->ras_block.ras_fini) + adev->umc.ras->ras_block.ras_fini = amdgpu_umc_ras_fini; + } } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 7506e198e6e1b4..3965aae435f87a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1202,7 +1202,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20; adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; - adev->umc.ras_funcs = &umc_v6_1_ras_funcs; + adev->umc.ras = &umc_v6_1_ras; break; case IP_VERSION(6, 1, 2): adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; @@ -1210,7 +1210,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT; adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; - adev->umc.ras_funcs = &umc_v6_1_ras_funcs; + adev->umc.ras = &umc_v6_1_ras; break; case IP_VERSION(6, 7, 0): adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM; @@ -1218,7 +1218,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM; adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET; if (!adev->gmc.xgmi.connected_to_cpu) - adev->umc.ras_funcs = &umc_v6_7_ras_funcs; + adev->umc.ras = &umc_v6_7_ras; if (1 & adev->smuio.funcs->get_die_id(adev)) adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0]; else @@ -1227,6 +1227,21 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) default: break; } + + if (adev->umc.ras) { + amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); + + strcpy(adev->umc.ras->ras_block.name,"umc"); + adev->umc.ras->ras_block.block = AMDGPU_RAS_BLOCK__UMC; + + /* If don't define special ras_late_init function, use default ras_late_init */ + if (!adev->umc.ras->ras_block.ras_late_init) + adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; + + /* If don't define special ras_fini function, use default ras_fini */ + if (!adev->umc.ras->ras_block.ras_fini) + adev->umc.ras->ras_block.ras_fini = amdgpu_umc_ras_fini; + } } static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 20b44983ac945f..4776301972d49d 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -465,10 +465,14 @@ static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev) umc_v6_1_enable_umc_index_mode(adev); } -const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs = { - .err_cnt_init = umc_v6_1_err_cnt_init, - .ras_late_init = amdgpu_umc_ras_late_init, - .ras_fini = amdgpu_umc_ras_fini, +const struct amdgpu_ras_block_hw_ops umc_v6_1_ras_hw_ops = { .query_ras_error_count = umc_v6_1_query_ras_error_count, .query_ras_error_address = umc_v6_1_query_ras_error_address, }; + +struct amdgpu_umc_ras umc_v6_1_ras = { + .ras_block = { + .hw_ops = &umc_v6_1_ras_hw_ops, + }, + .err_cnt_init = umc_v6_1_err_cnt_init, +}; \ No newline at end of file diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h index 5dc36c730bb2a2..50c632eb4cc6ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h @@ -45,7 +45,7 @@ /* umc ce count initial value */ #define UMC_V6_1_CE_CNT_INIT (UMC_V6_1_CE_CNT_MAX - UMC_V6_1_CE_INT_THRESHOLD) -extern const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs; +extern struct amdgpu_umc_ras umc_v6_1_ras; extern const uint32_t umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM]; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index 6dd1e19e8d4323..6953426f0bedff 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -480,11 +480,15 @@ static bool umc_v6_7_query_ras_poison_mode(struct amdgpu_device *adev) return true; } -const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = { - .ras_late_init = amdgpu_umc_ras_late_init, - .ras_fini = amdgpu_umc_ras_fini, +const struct amdgpu_ras_block_hw_ops umc_v6_7_ras_hw_ops = { .query_ras_error_count = umc_v6_7_query_ras_error_count, .query_ras_error_address = umc_v6_7_query_ras_error_address, +}; + +struct amdgpu_umc_ras umc_v6_7_ras = { + .ras_block = { + .hw_ops = &umc_v6_7_ras_hw_ops, + }, .query_ras_poison_mode = umc_v6_7_query_ras_poison_mode, .ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count, .ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address, diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h index 57f2557e7acab1..1f2edf62537041 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h @@ -43,7 +43,7 @@ #define UMC_V6_7_TOTAL_CHANNEL_NUM (UMC_V6_7_CHANNEL_INSTANCE_NUM * UMC_V6_7_UMC_INSTANCE_NUM) /* UMC regiser per channel offset */ #define UMC_V6_7_PER_CHANNEL_OFFSET 0x400 -extern const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs; +extern struct amdgpu_umc_ras umc_v6_7_ras; extern const uint32_t umc_v6_7_channel_idx_tbl_second[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM]; extern const uint32_t diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c index af59a35788e3ee..ff9e1fac616d04 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c @@ -324,10 +324,14 @@ static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev) } } -const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs = { - .err_cnt_init = umc_v8_7_err_cnt_init, - .ras_late_init = amdgpu_umc_ras_late_init, - .ras_fini = amdgpu_umc_ras_fini, +const struct amdgpu_ras_block_hw_ops umc_v8_7_ras_hw_ops = { .query_ras_error_count = umc_v8_7_query_ras_error_count, .query_ras_error_address = umc_v8_7_query_ras_error_address, }; + +struct amdgpu_umc_ras umc_v8_7_ras = { + .ras_block = { + .hw_ops = &umc_v8_7_ras_hw_ops, + }, + .err_cnt_init = umc_v8_7_err_cnt_init, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h index 37e6dc7c28e0d9..dd4993f5f78f4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h @@ -44,7 +44,7 @@ /* umc ce count initial value */ #define UMC_V8_7_CE_CNT_INIT (UMC_V8_7_CE_CNT_MAX - UMC_V8_7_CE_INT_THRESHOLD) -extern const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs; +extern struct amdgpu_umc_ras umc_v8_7_ras; extern const uint32_t umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM];