2424MODULE_IMPORT_NS ("DMA_BUF" );
2525
2626static int
27- amdxdna_gem_insert_node_locked (struct amdxdna_gem_obj * abo , bool use_vmap )
27+ amdxdna_gem_heap_alloc (struct amdxdna_gem_obj * abo )
2828{
2929 struct amdxdna_client * client = abo -> client ;
3030 struct amdxdna_dev * xdna = client -> xdna ;
3131 struct amdxdna_mem * mem = & abo -> mem ;
32+ struct amdxdna_gem_obj * heap ;
3233 u64 offset ;
3334 u32 align ;
3435 int ret ;
3536
37+ mutex_lock (& client -> mm_lock );
38+
39+ heap = client -> dev_heap ;
40+ if (!heap ) {
41+ ret = - EINVAL ;
42+ goto unlock_out ;
43+ }
44+
45+ if (heap -> mem .userptr == AMDXDNA_INVALID_ADDR ) {
46+ XDNA_ERR (xdna , "Invalid dev heap userptr" );
47+ ret = - EINVAL ;
48+ goto unlock_out ;
49+ }
50+
51+ if (mem -> size == 0 || mem -> size > heap -> mem .size ) {
52+ XDNA_ERR (xdna , "Invalid dev bo size 0x%lx, limit 0x%lx" ,
53+ mem -> size , heap -> mem .size );
54+ ret = - EINVAL ;
55+ goto unlock_out ;
56+ }
57+
3658 align = 1 << max (PAGE_SHIFT , xdna -> dev_info -> dev_mem_buf_shift );
37- ret = drm_mm_insert_node_generic (& abo -> dev_heap -> mm , & abo -> mm_node ,
59+ ret = drm_mm_insert_node_generic (& heap -> mm , & abo -> mm_node ,
3860 mem -> size , align ,
3961 0 , DRM_MM_INSERT_BEST );
4062 if (ret ) {
4163 XDNA_ERR (xdna , "Failed to alloc dev bo memory, ret %d" , ret );
42- return ret ;
64+ goto unlock_out ;
4365 }
4466
4567 mem -> dev_addr = abo -> mm_node .start ;
46- offset = mem -> dev_addr - abo -> dev_heap -> mem .dev_addr ;
47- mem -> userptr = abo -> dev_heap -> mem .userptr + offset ;
48- mem -> pages = & abo -> dev_heap -> base .pages [offset >> PAGE_SHIFT ];
49- mem -> nr_pages = mem -> size >> PAGE_SHIFT ;
50-
51- if (use_vmap ) {
52- mem -> kva = vmap (mem -> pages , mem -> nr_pages , VM_MAP , PAGE_KERNEL );
53- if (!mem -> kva ) {
54- XDNA_ERR (xdna , "Failed to vmap" );
55- drm_mm_remove_node (& abo -> mm_node );
56- return - EFAULT ;
57- }
58- }
68+ offset = mem -> dev_addr - heap -> mem .dev_addr ;
69+ mem -> userptr = heap -> mem .userptr + offset ;
70+ mem -> kva = heap -> mem .kva + offset ;
5971
60- return 0 ;
72+ drm_gem_object_get (to_gobj (heap ));
73+
74+ unlock_out :
75+ mutex_unlock (& client -> mm_lock );
76+
77+ return ret ;
78+ }
79+
80+ static void
81+ amdxdna_gem_destroy_obj (struct amdxdna_gem_obj * abo )
82+ {
83+ mutex_destroy (& abo -> lock );
84+ kfree (abo );
85+ }
86+
87+ static void
88+ amdxdna_gem_heap_free (struct amdxdna_gem_obj * abo )
89+ {
90+ struct amdxdna_gem_obj * heap ;
91+
92+ mutex_lock (& abo -> client -> mm_lock );
93+
94+ drm_mm_remove_node (& abo -> mm_node );
95+
96+ heap = abo -> client -> dev_heap ;
97+ drm_gem_object_put (to_gobj (heap ));
98+
99+ mutex_unlock (& abo -> client -> mm_lock );
61100}
62101
63102static bool amdxdna_hmm_invalidate (struct mmu_interval_notifier * mni ,
@@ -213,6 +252,20 @@ static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo,
213252 return ret ;
214253}
215254
255+ static void amdxdna_gem_dev_obj_free (struct drm_gem_object * gobj )
256+ {
257+ struct amdxdna_dev * xdna = to_xdna_dev (gobj -> dev );
258+ struct amdxdna_gem_obj * abo = to_xdna_obj (gobj );
259+
260+ XDNA_DBG (xdna , "BO type %d xdna_addr 0x%llx" , abo -> type , abo -> mem .dev_addr );
261+ if (abo -> pinned )
262+ amdxdna_gem_unpin (abo );
263+
264+ amdxdna_gem_heap_free (abo );
265+ drm_gem_object_release (gobj );
266+ amdxdna_gem_destroy_obj (abo );
267+ }
268+
216269static int amdxdna_insert_pages (struct amdxdna_gem_obj * abo ,
217270 struct vm_area_struct * vma )
218271{
@@ -374,19 +427,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
374427 if (abo -> pinned )
375428 amdxdna_gem_unpin (abo );
376429
377- if (abo -> type == AMDXDNA_BO_DEV ) {
378- mutex_lock (& abo -> client -> mm_lock );
379- drm_mm_remove_node (& abo -> mm_node );
380- mutex_unlock (& abo -> client -> mm_lock );
381-
382- vunmap (abo -> mem .kva );
383- drm_gem_object_put (to_gobj (abo -> dev_heap ));
384- drm_gem_object_release (gobj );
385- mutex_destroy (& abo -> lock );
386- kfree (abo );
387- return ;
388- }
389-
390430 if (abo -> type == AMDXDNA_BO_DEV_HEAP )
391431 drm_mm_takedown (& abo -> mm );
392432
@@ -402,7 +442,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
402442}
403443
404444static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
405- .free = amdxdna_gem_obj_free ,
445+ .free = amdxdna_gem_dev_obj_free ,
406446};
407447
408448static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
@@ -527,6 +567,7 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
527567 struct drm_file * filp )
528568{
529569 struct amdxdna_client * client = filp -> driver_priv ;
570+ struct iosys_map map = IOSYS_MAP_INIT_VADDR (NULL );
530571 struct amdxdna_dev * xdna = to_xdna_dev (dev );
531572 struct drm_gem_shmem_object * shmem ;
532573 struct amdxdna_gem_obj * abo ;
@@ -553,18 +594,26 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
553594
554595 shmem -> map_wc = false;
555596 abo = to_xdna_obj (& shmem -> base );
556-
557597 abo -> type = AMDXDNA_BO_DEV_HEAP ;
558598 abo -> client = client ;
559599 abo -> mem .dev_addr = client -> xdna -> dev_info -> dev_mem_base ;
560600 drm_mm_init (& abo -> mm , abo -> mem .dev_addr , abo -> mem .size );
561601
602+ ret = drm_gem_vmap (to_gobj (abo ), & map );
603+ if (ret ) {
604+ XDNA_ERR (xdna , "Vmap heap bo failed, ret %d" , ret );
605+ goto release_obj ;
606+ }
607+ abo -> mem .kva = map .vaddr ;
608+
562609 client -> dev_heap = abo ;
563610 drm_gem_object_get (to_gobj (abo ));
564611 mutex_unlock (& client -> mm_lock );
565612
566613 return abo ;
567614
615+ release_obj :
616+ drm_gem_object_put (to_gobj (abo ));
568617mm_unlock :
569618 mutex_unlock (& client -> mm_lock );
570619 return ERR_PTR (ret );
@@ -573,69 +622,43 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
573622struct amdxdna_gem_obj *
574623amdxdna_drm_alloc_dev_bo (struct drm_device * dev ,
575624 struct amdxdna_drm_create_bo * args ,
576- struct drm_file * filp , bool use_vmap )
625+ struct drm_file * filp )
577626{
578627 struct amdxdna_client * client = filp -> driver_priv ;
579628 struct amdxdna_dev * xdna = to_xdna_dev (dev );
580629 size_t aligned_sz = PAGE_ALIGN (args -> size );
581- struct amdxdna_gem_obj * abo , * heap ;
630+ struct amdxdna_gem_obj * abo ;
582631 int ret ;
583632
584- mutex_lock (& client -> mm_lock );
585- heap = client -> dev_heap ;
586- if (!heap ) {
587- ret = - EINVAL ;
588- goto mm_unlock ;
589- }
590-
591- if (heap -> mem .userptr == AMDXDNA_INVALID_ADDR ) {
592- XDNA_ERR (xdna , "Invalid dev heap userptr" );
593- ret = - EINVAL ;
594- goto mm_unlock ;
595- }
596-
597- if (args -> size > heap -> mem .size ) {
598- XDNA_ERR (xdna , "Invalid dev bo size 0x%llx, limit 0x%lx" ,
599- args -> size , heap -> mem .size );
600- ret = - EINVAL ;
601- goto mm_unlock ;
602- }
603-
604633 abo = amdxdna_gem_create_obj (& xdna -> ddev , aligned_sz );
605- if (IS_ERR (abo )) {
606- ret = PTR_ERR (abo );
607- goto mm_unlock ;
608- }
634+ if (IS_ERR (abo ))
635+ return abo ;
636+
609637 to_gobj (abo )-> funcs = & amdxdna_gem_dev_obj_funcs ;
610638 abo -> type = AMDXDNA_BO_DEV ;
611639 abo -> client = client ;
612- abo -> dev_heap = heap ;
613- ret = amdxdna_gem_insert_node_locked (abo , use_vmap );
640+
641+ ret = amdxdna_gem_heap_alloc (abo );
614642 if (ret ) {
615643 XDNA_ERR (xdna , "Failed to alloc dev bo memory, ret %d" , ret );
616- goto mm_unlock ;
644+ amdxdna_gem_destroy_obj (abo );
645+ return ERR_PTR (ret );
617646 }
618647
619- drm_gem_object_get (to_gobj (heap ));
620648 drm_gem_private_object_init (& xdna -> ddev , to_gobj (abo ), aligned_sz );
621649
622- mutex_unlock (& client -> mm_lock );
623650 return abo ;
624-
625- mm_unlock :
626- mutex_unlock (& client -> mm_lock );
627- return ERR_PTR (ret );
628651}
629652
630653static struct amdxdna_gem_obj *
631654amdxdna_drm_create_cmd_bo (struct drm_device * dev ,
632655 struct amdxdna_drm_create_bo * args ,
633656 struct drm_file * filp )
634657{
658+ struct iosys_map map = IOSYS_MAP_INIT_VADDR (NULL );
635659 struct amdxdna_dev * xdna = to_xdna_dev (dev );
636660 struct drm_gem_shmem_object * shmem ;
637661 struct amdxdna_gem_obj * abo ;
638- struct iosys_map map ;
639662 int ret ;
640663
641664 if (args -> size > XDNA_MAX_CMD_BO_SIZE ) {
@@ -692,7 +715,7 @@ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_f
692715 abo = amdxdna_drm_create_dev_heap (dev , args , filp );
693716 break ;
694717 case AMDXDNA_BO_DEV :
695- abo = amdxdna_drm_alloc_dev_bo (dev , args , filp , false );
718+ abo = amdxdna_drm_alloc_dev_bo (dev , args , filp );
696719 break ;
697720 case AMDXDNA_BO_CMD :
698721 abo = amdxdna_drm_create_cmd_bo (dev , args , filp );
@@ -724,20 +747,13 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
724747 struct amdxdna_dev * xdna = to_xdna_dev (to_gobj (abo )-> dev );
725748 int ret ;
726749
750+ if (abo -> type == AMDXDNA_BO_DEV )
751+ abo = abo -> client -> dev_heap ;
752+
727753 if (is_import_bo (abo ))
728754 return 0 ;
729755
730- switch (abo -> type ) {
731- case AMDXDNA_BO_SHMEM :
732- case AMDXDNA_BO_DEV_HEAP :
733- ret = drm_gem_shmem_pin (& abo -> base );
734- break ;
735- case AMDXDNA_BO_DEV :
736- ret = drm_gem_shmem_pin (& abo -> dev_heap -> base );
737- break ;
738- default :
739- ret = - EOPNOTSUPP ;
740- }
756+ ret = drm_gem_shmem_pin (& abo -> base );
741757
742758 XDNA_DBG (xdna , "BO type %d ret %d" , abo -> type , ret );
743759 return ret ;
@@ -747,9 +763,6 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
747763{
748764 int ret ;
749765
750- if (abo -> type == AMDXDNA_BO_DEV )
751- abo = abo -> dev_heap ;
752-
753766 mutex_lock (& abo -> lock );
754767 ret = amdxdna_gem_pin_nolock (abo );
755768 mutex_unlock (& abo -> lock );
@@ -759,12 +772,12 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
759772
760773void amdxdna_gem_unpin (struct amdxdna_gem_obj * abo )
761774{
775+ if (abo -> type == AMDXDNA_BO_DEV )
776+ abo = abo -> client -> dev_heap ;
777+
762778 if (is_import_bo (abo ))
763779 return ;
764780
765- if (abo -> type == AMDXDNA_BO_DEV )
766- abo = abo -> dev_heap ;
767-
768781 mutex_lock (& abo -> lock );
769782 drm_gem_shmem_unpin (& abo -> base );
770783 mutex_unlock (& abo -> lock );
@@ -855,10 +868,12 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
855868
856869 if (is_import_bo (abo ))
857870 drm_clflush_sg (abo -> base .sgt );
858- else if (abo -> type == AMDXDNA_BO_DEV )
859- drm_clflush_pages (abo -> mem .pages , abo -> mem . nr_pages );
860- else
871+ else if (abo -> mem . kva )
872+ drm_clflush_virt_range (abo -> mem .kva + args -> offset , args -> size );
873+ else if ( abo -> base . pages )
861874 drm_clflush_pages (abo -> base .pages , gobj -> size >> PAGE_SHIFT );
875+ else
876+ drm_WARN (& xdna -> ddev , 1 , "Can not get flush memory" );
862877
863878 amdxdna_gem_unpin (abo );
864879
0 commit comments