@@ -149,7 +149,7 @@ static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
149149static bool amdgpu_gfx_is_compute_multipipe_capable (struct amdgpu_device * adev )
150150{
151151 if (amdgpu_compute_multipipe != -1 ) {
152- DRM_INFO ( "amdgpu: forcing compute pipe policy %d\n" ,
152+ dev_info ( adev -> dev , "amdgpu: forcing compute pipe policy %d\n" ,
153153 amdgpu_compute_multipipe );
154154 return amdgpu_compute_multipipe == 1 ;
155155 }
@@ -674,7 +674,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
674674 * generation exposes more than 64 queues. If so, the
675675 * definition of queue_mask needs updating */
676676 if (WARN_ON (i > (sizeof (queue_mask )* 8 ))) {
677- DRM_ERROR ( "Invalid KCQ enabled: %d\n" , i );
677+ dev_err ( adev -> dev , "Invalid KCQ enabled: %d\n" , i );
678678 break ;
679679 }
680680
@@ -683,15 +683,15 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
683683
684684 amdgpu_device_flush_hdp (adev , NULL );
685685
686- DRM_INFO ( "kiq ring mec %d pipe %d q %d\n" , kiq_ring -> me , kiq_ring -> pipe ,
687- kiq_ring -> queue );
686+ dev_info ( adev -> dev , "kiq ring mec %d pipe %d q %d\n" , kiq_ring -> me ,
687+ kiq_ring -> pipe , kiq_ring -> queue );
688688
689689 spin_lock (& kiq -> ring_lock );
690690 r = amdgpu_ring_alloc (kiq_ring , kiq -> pmf -> map_queues_size *
691691 adev -> gfx .num_compute_rings +
692692 kiq -> pmf -> set_resources_size );
693693 if (r ) {
694- DRM_ERROR ( "Failed to lock KIQ (%d).\n" , r );
694+ dev_err ( adev -> dev , "Failed to lock KIQ (%d).\n" , r );
695695 spin_unlock (& kiq -> ring_lock );
696696 return r ;
697697 }
@@ -712,7 +712,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
712712 r = amdgpu_ring_test_helper (kiq_ring );
713713 spin_unlock (& kiq -> ring_lock );
714714 if (r )
715- DRM_ERROR ( "KCQ enable failed\n" );
715+ dev_err ( adev -> dev , "KCQ enable failed\n" );
716716
717717 return r ;
718718}
@@ -734,7 +734,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
734734 r = amdgpu_mes_map_legacy_queue (adev ,
735735 & adev -> gfx .gfx_ring [j ]);
736736 if (r ) {
737- DRM_ERROR ( "failed to map gfx queue\n" );
737+ dev_err ( adev -> dev , "failed to map gfx queue\n" );
738738 return r ;
739739 }
740740 }
@@ -748,7 +748,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
748748 r = amdgpu_ring_alloc (kiq_ring , kiq -> pmf -> map_queues_size *
749749 adev -> gfx .num_gfx_rings );
750750 if (r ) {
751- DRM_ERROR ( "Failed to lock KIQ (%d).\n" , r );
751+ dev_err ( adev -> dev , "Failed to lock KIQ (%d).\n" , r );
752752 spin_unlock (& kiq -> ring_lock );
753753 return r ;
754754 }
@@ -769,7 +769,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
769769 r = amdgpu_ring_test_helper (kiq_ring );
770770 spin_unlock (& kiq -> ring_lock );
771771 if (r )
772- DRM_ERROR ( "KGQ enable failed\n" );
772+ dev_err ( adev -> dev , "KGQ enable failed\n" );
773773
774774 return r ;
775775}
@@ -1030,7 +1030,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
10301030
10311031 ih_data .head = * ras_if ;
10321032
1033- DRM_ERROR ( "CP ECC ERROR IRQ\n" );
1033+ dev_err ( adev -> dev , "CP ECC ERROR IRQ\n" );
10341034 amdgpu_ras_interrupt_dispatch (adev , & ih_data );
10351035 return 0 ;
10361036}
0 commit comments