@@ -44,6 +44,45 @@ u32 amdgpu_userqueue_get_supported_ip_mask(struct amdgpu_device *adev)
4444 return userq_ip_mask ;
4545}
4646
47+ static int
48+ amdgpu_userqueue_unmap_helper (struct amdgpu_userq_mgr * uq_mgr ,
49+ struct amdgpu_usermode_queue * queue )
50+ {
51+ struct amdgpu_device * adev = uq_mgr -> adev ;
52+ const struct amdgpu_userq_funcs * userq_funcs =
53+ adev -> userq_funcs [queue -> queue_type ];
54+ int r = 0 ;
55+
56+ if (queue -> state == AMDGPU_USERQ_STATE_MAPPED ) {
57+ r = userq_funcs -> unmap (uq_mgr , queue );
58+ if (r )
59+ queue -> state = AMDGPU_USERQ_STATE_HUNG ;
60+ else
61+ queue -> state = AMDGPU_USERQ_STATE_UNMAPPED ;
62+ }
63+ return r ;
64+ }
65+
66+ static int
67+ amdgpu_userqueue_map_helper (struct amdgpu_userq_mgr * uq_mgr ,
68+ struct amdgpu_usermode_queue * queue )
69+ {
70+ struct amdgpu_device * adev = uq_mgr -> adev ;
71+ const struct amdgpu_userq_funcs * userq_funcs =
72+ adev -> userq_funcs [queue -> queue_type ];
73+ int r = 0 ;
74+
75+ if (queue -> state == AMDGPU_USERQ_STATE_UNMAPPED ) {
76+ r = userq_funcs -> map (uq_mgr , queue );
77+ if (r ) {
78+ queue -> state = AMDGPU_USERQ_STATE_HUNG ;
79+ } else {
80+ queue -> state = AMDGPU_USERQ_STATE_MAPPED ;
81+ }
82+ }
83+ return r ;
84+ }
85+
4786static void
4887amdgpu_userqueue_cleanup (struct amdgpu_userq_mgr * uq_mgr ,
4988 struct amdgpu_usermode_queue * queue ,
@@ -79,7 +118,7 @@ amdgpu_userqueue_active(struct amdgpu_userq_mgr *uq_mgr)
79118 mutex_lock (& uq_mgr -> userq_mutex );
80119 /* Resume all the queues for this process */
81120 idr_for_each_entry (& uq_mgr -> userq_idr , queue , queue_id )
82- ret += queue -> queue_active ;
121+ ret += queue -> state == AMDGPU_USERQ_STATE_MAPPED ;
83122
84123 mutex_unlock (& uq_mgr -> userq_mutex );
85124 return ret ;
@@ -254,9 +293,8 @@ amdgpu_userqueue_destroy(struct drm_file *filp, int queue_id)
254293 struct amdgpu_fpriv * fpriv = filp -> driver_priv ;
255294 struct amdgpu_userq_mgr * uq_mgr = & fpriv -> userq_mgr ;
256295 struct amdgpu_device * adev = uq_mgr -> adev ;
257- const struct amdgpu_userq_funcs * uq_funcs ;
258296 struct amdgpu_usermode_queue * queue ;
259- int r ;
297+ int r = 0 ;
260298
261299 cancel_delayed_work (& uq_mgr -> resume_work );
262300 mutex_lock (& uq_mgr -> userq_mutex );
@@ -267,8 +305,7 @@ amdgpu_userqueue_destroy(struct drm_file *filp, int queue_id)
267305 mutex_unlock (& uq_mgr -> userq_mutex );
268306 return - EINVAL ;
269307 }
270- uq_funcs = adev -> userq_funcs [queue -> queue_type ];
271- r = uq_funcs -> unmap (uq_mgr , queue );
308+ r = amdgpu_userqueue_unmap_helper (uq_mgr , queue );
272309 amdgpu_bo_unpin (queue -> db_obj .obj );
273310 amdgpu_bo_unref (& queue -> db_obj .obj );
274311 amdgpu_userqueue_cleanup (uq_mgr , queue , queue_id );
@@ -414,7 +451,7 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args)
414451 else
415452 skip_map_queue = false;
416453 if (!skip_map_queue ) {
417- r = uq_funcs -> map (uq_mgr , queue );
454+ r = amdgpu_userqueue_map_helper (uq_mgr , queue );
418455 if (r ) {
419456 mutex_unlock (& adev -> userq_mutex );
420457 DRM_ERROR ("Failed to map Queue\n" );
@@ -489,19 +526,19 @@ static int
489526amdgpu_userqueue_resume_all (struct amdgpu_userq_mgr * uq_mgr )
490527{
491528 struct amdgpu_device * adev = uq_mgr -> adev ;
492- const struct amdgpu_userq_funcs * userq_funcs ;
493529 struct amdgpu_usermode_queue * queue ;
494530 int queue_id ;
495- int ret = 0 ;
531+ int ret = 0 , r ;
496532
497533 /* Resume all the queues for this process */
498534 idr_for_each_entry (& uq_mgr -> userq_idr , queue , queue_id ) {
499- userq_funcs = adev -> userq_funcs [queue -> queue_type ];
500- ret |= userq_funcs -> map (uq_mgr , queue );
535+ r = amdgpu_userqueue_map_helper (uq_mgr , queue );
536+ if (r )
537+ ret = r ;
501538 }
502539
503540 if (ret )
504- DRM_ERROR ( "Failed to map all the queues\n" );
541+ dev_err ( adev -> dev , "Failed to map all the queues\n" );
505542 return ret ;
506543}
507544
@@ -647,19 +684,19 @@ static int
647684amdgpu_userqueue_suspend_all (struct amdgpu_userq_mgr * uq_mgr )
648685{
649686 struct amdgpu_device * adev = uq_mgr -> adev ;
650- const struct amdgpu_userq_funcs * userq_funcs ;
651687 struct amdgpu_usermode_queue * queue ;
652688 int queue_id ;
653- int ret = 0 ;
689+ int ret = 0 , r ;
654690
655691 /* Try to unmap all the queues in this process ctx */
656692 idr_for_each_entry (& uq_mgr -> userq_idr , queue , queue_id ) {
657- userq_funcs = adev -> userq_funcs [queue -> queue_type ];
658- ret += userq_funcs -> unmap (uq_mgr , queue );
693+ r = amdgpu_userqueue_unmap_helper (uq_mgr , queue );
694+ if (r )
695+ ret = r ;
659696 }
660697
661698 if (ret )
662- DRM_ERROR ( "Couldn't unmap all the queues\n" );
699+ dev_err ( adev -> dev , "Couldn't unmap all the queues\n" );
663700 return ret ;
664701}
665702
@@ -760,11 +797,10 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
760797int amdgpu_userq_suspend (struct amdgpu_device * adev )
761798{
762799 u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask (adev );
763- const struct amdgpu_userq_funcs * userq_funcs ;
764800 struct amdgpu_usermode_queue * queue ;
765801 struct amdgpu_userq_mgr * uqm , * tmp ;
766802 int queue_id ;
767- int ret = 0 ;
803+ int ret = 0 , r ;
768804
769805 if (!ip_mask )
770806 return 0 ;
@@ -773,8 +809,9 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
773809 list_for_each_entry_safe (uqm , tmp , & adev -> userq_mgr_list , list ) {
774810 cancel_delayed_work_sync (& uqm -> resume_work );
775811 idr_for_each_entry (& uqm -> userq_idr , queue , queue_id ) {
776- userq_funcs = adev -> userq_funcs [queue -> queue_type ];
777- ret |= userq_funcs -> unmap (uqm , queue );
812+ r = amdgpu_userqueue_unmap_helper (uqm , queue );
813+ if (r )
814+ ret = r ;
778815 }
779816 }
780817 mutex_unlock (& adev -> userq_mutex );
@@ -784,20 +821,20 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
784821int amdgpu_userq_resume (struct amdgpu_device * adev )
785822{
786823 u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask (adev );
787- const struct amdgpu_userq_funcs * userq_funcs ;
788824 struct amdgpu_usermode_queue * queue ;
789825 struct amdgpu_userq_mgr * uqm , * tmp ;
790826 int queue_id ;
791- int ret = 0 ;
827+ int ret = 0 , r ;
792828
793829 if (!ip_mask )
794830 return 0 ;
795831
796832 mutex_lock (& adev -> userq_mutex );
797833 list_for_each_entry_safe (uqm , tmp , & adev -> userq_mgr_list , list ) {
798834 idr_for_each_entry (& uqm -> userq_idr , queue , queue_id ) {
799- userq_funcs = adev -> userq_funcs [queue -> queue_type ];
800- ret |= userq_funcs -> map (uqm , queue );
835+ r = amdgpu_userqueue_map_helper (uqm , queue );
836+ if (r )
837+ ret = r ;
801838 }
802839 }
803840 mutex_unlock (& adev -> userq_mutex );
@@ -808,11 +845,10 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
808845 u32 idx )
809846{
810847 u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask (adev );
811- const struct amdgpu_userq_funcs * userq_funcs ;
812848 struct amdgpu_usermode_queue * queue ;
813849 struct amdgpu_userq_mgr * uqm , * tmp ;
814850 int queue_id ;
815- int ret = 0 ;
851+ int ret = 0 , r ;
816852
817853 /* only need to stop gfx/compute */
818854 if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX ) | (1 << AMDGPU_HW_IP_COMPUTE ))))
@@ -828,8 +864,9 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
828864 if (((queue -> queue_type == AMDGPU_HW_IP_GFX ) ||
829865 (queue -> queue_type == AMDGPU_HW_IP_COMPUTE )) &&
830866 (queue -> xcp_id == idx )) {
831- userq_funcs = adev -> userq_funcs [queue -> queue_type ];
832- ret |= userq_funcs -> unmap (uqm , queue );
867+ r = amdgpu_userqueue_unmap_helper (uqm , queue );
868+ if (r )
869+ ret = r ;
833870 }
834871 }
835872 }
@@ -841,11 +878,10 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
841878 u32 idx )
842879{
843880 u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask (adev );
844- const struct amdgpu_userq_funcs * userq_funcs ;
845881 struct amdgpu_usermode_queue * queue ;
846882 struct amdgpu_userq_mgr * uqm , * tmp ;
847883 int queue_id ;
848- int ret = 0 ;
884+ int ret = 0 , r ;
849885
850886 /* only need to stop gfx/compute */
851887 if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX ) | (1 << AMDGPU_HW_IP_COMPUTE ))))
@@ -860,8 +896,9 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
860896 if (((queue -> queue_type == AMDGPU_HW_IP_GFX ) ||
861897 (queue -> queue_type == AMDGPU_HW_IP_COMPUTE )) &&
862898 (queue -> xcp_id == idx )) {
863- userq_funcs = adev -> userq_funcs [queue -> queue_type ];
864- ret |= userq_funcs -> map (uqm , queue );
899+ r = amdgpu_userqueue_map_helper (uqm , queue );
900+ if (r )
901+ ret = r ;
865902 }
866903 }
867904 }
0 commit comments