@@ -51,14 +51,16 @@ struct tegra_smmu {
5151 struct iommu_device iommu ; /* IOMMU Core code handle */
5252};
5353
54+ struct tegra_pd ;
55+
5456struct tegra_smmu_as {
5557 struct iommu_domain domain ;
5658 struct tegra_smmu * smmu ;
5759 unsigned int use_count ;
5860 spinlock_t lock ;
5961 u32 * count ;
6062 struct page * * pts ;
61- struct page * pd ;
63+ struct tegra_pd * pd ;
6264 dma_addr_t pd_dma ;
6365 unsigned id ;
6466 u32 attr ;
@@ -155,6 +157,10 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
155157#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
156158 SMMU_PDE_NONSECURE)
157159
160+ struct tegra_pd {
161+ u32 val [SMMU_NUM_PDE ];
162+ };
163+
158164static unsigned int iova_pd_index (unsigned long iova )
159165{
160166 return (iova >> SMMU_PDE_SHIFT ) & (SMMU_NUM_PDE - 1 );
@@ -284,23 +290,23 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
284290
285291 as -> attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE ;
286292
287- as -> pd = __iommu_alloc_pages (GFP_KERNEL | __GFP_DMA , 0 );
293+ as -> pd = iommu_alloc_page (GFP_KERNEL | __GFP_DMA );
288294 if (!as -> pd ) {
289295 kfree (as );
290296 return NULL ;
291297 }
292298
293299 as -> count = kcalloc (SMMU_NUM_PDE , sizeof (u32 ), GFP_KERNEL );
294300 if (!as -> count ) {
295- __iommu_free_pages (as -> pd , 0 );
301+ iommu_free_page (as -> pd );
296302 kfree (as );
297303 return NULL ;
298304 }
299305
300306 as -> pts = kcalloc (SMMU_NUM_PDE , sizeof (* as -> pts ), GFP_KERNEL );
301307 if (!as -> pts ) {
302308 kfree (as -> count );
303- __iommu_free_pages (as -> pd , 0 );
309+ iommu_free_page (as -> pd );
304310 kfree (as );
305311 return NULL ;
306312 }
@@ -417,8 +423,8 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
417423 goto unlock ;
418424 }
419425
420- as -> pd_dma = dma_map_page ( smmu -> dev , as -> pd , 0 , SMMU_SIZE_PD ,
421- DMA_TO_DEVICE );
426+ as -> pd_dma =
427+ dma_map_single ( smmu -> dev , as -> pd , SMMU_SIZE_PD , DMA_TO_DEVICE );
422428 if (dma_mapping_error (smmu -> dev , as -> pd_dma )) {
423429 err = - ENOMEM ;
424430 goto unlock ;
@@ -450,7 +456,7 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
450456 return 0 ;
451457
452458err_unmap :
453- dma_unmap_page (smmu -> dev , as -> pd_dma , SMMU_SIZE_PD , DMA_TO_DEVICE );
459+ dma_unmap_single (smmu -> dev , as -> pd_dma , SMMU_SIZE_PD , DMA_TO_DEVICE );
454460unlock :
455461 mutex_unlock (& smmu -> lock );
456462
@@ -469,7 +475,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
469475
470476 tegra_smmu_free_asid (smmu , as -> id );
471477
472- dma_unmap_page (smmu -> dev , as -> pd_dma , SMMU_SIZE_PD , DMA_TO_DEVICE );
478+ dma_unmap_single (smmu -> dev , as -> pd_dma , SMMU_SIZE_PD , DMA_TO_DEVICE );
473479
474480 as -> smmu = NULL ;
475481
@@ -548,11 +554,11 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
548554{
549555 unsigned int pd_index = iova_pd_index (iova );
550556 struct tegra_smmu * smmu = as -> smmu ;
551- u32 * pd = page_address ( as -> pd ) ;
557+ struct tegra_pd * pd = as -> pd ;
552558 unsigned long offset = pd_index * sizeof (* pd );
553559
554560 /* Set the page directory entry first */
555- pd [pd_index ] = value ;
561+ pd -> val [pd_index ] = value ;
556562
557563 /* The flush the page directory entry from caches */
558564 dma_sync_single_range_for_device (smmu -> dev , as -> pd_dma , offset ,
@@ -577,14 +583,12 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
577583 unsigned int pd_index = iova_pd_index (iova );
578584 struct tegra_smmu * smmu = as -> smmu ;
579585 struct page * pt_page ;
580- u32 * pd ;
581586
582587 pt_page = as -> pts [pd_index ];
583588 if (!pt_page )
584589 return NULL ;
585590
586- pd = page_address (as -> pd );
587- * dmap = smmu_pde_to_dma (smmu , pd [pd_index ]);
591+ * dmap = smmu_pde_to_dma (smmu , as -> pd -> val [pd_index ]);
588592
589593 return tegra_smmu_pte_offset (pt_page , iova );
590594}
@@ -619,9 +623,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
619623
620624 * dmap = dma ;
621625 } else {
622- u32 * pd = page_address (as -> pd );
623-
624- * dmap = smmu_pde_to_dma (smmu , pd [pde ]);
626+ * dmap = smmu_pde_to_dma (smmu , as -> pd -> val [pde ]);
625627 }
626628
627629 return tegra_smmu_pte_offset (as -> pts [pde ], iova );
@@ -645,8 +647,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
645647 */
646648 if (-- as -> count [pde ] == 0 ) {
647649 struct tegra_smmu * smmu = as -> smmu ;
648- u32 * pd = page_address (as -> pd );
649- dma_addr_t pte_dma = smmu_pde_to_dma (smmu , pd [pde ]);
650+ dma_addr_t pte_dma = smmu_pde_to_dma (smmu , as -> pd -> val [pde ]);
650651
651652 tegra_smmu_set_pde (as , iova , 0 );
652653
0 commit comments