@@ -1138,6 +1138,43 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
11381138 arch_sync_dma_for_device (sg_phys (sg ), sg -> length , dir );
11391139}
11401140
1141+ static phys_addr_t iommu_dma_map_swiotlb (struct device * dev , phys_addr_t phys ,
1142+ size_t size , enum dma_data_direction dir , unsigned long attrs )
1143+ {
1144+ struct iommu_domain * domain = iommu_get_dma_domain (dev );
1145+ struct iova_domain * iovad = & domain -> iova_cookie -> iovad ;
1146+
1147+ if (!is_swiotlb_active (dev )) {
1148+ dev_warn_once (dev , "DMA bounce buffers are inactive, unable to map unaligned transaction.\n" );
1149+ return (phys_addr_t )DMA_MAPPING_ERROR ;
1150+ }
1151+
1152+ trace_swiotlb_bounced (dev , phys , size );
1153+
1154+ phys = swiotlb_tbl_map_single (dev , phys , size , iova_mask (iovad ), dir ,
1155+ attrs );
1156+
1157+ /*
1158+ * Untrusted devices should not see padding areas with random leftover
1159+ * kernel data, so zero the pre- and post-padding.
1160+ * swiotlb_tbl_map_single() has initialized the bounce buffer proper to
1161+ * the contents of the original memory buffer.
1162+ */
1163+ if (phys != (phys_addr_t )DMA_MAPPING_ERROR && dev_is_untrusted (dev )) {
1164+ size_t start , virt = (size_t )phys_to_virt (phys );
1165+
1166+ /* Pre-padding */
1167+ start = iova_align_down (iovad , virt );
1168+ memset ((void * )start , 0 , virt - start );
1169+
1170+ /* Post-padding */
1171+ start = virt + size ;
1172+ memset ((void * )start , 0 , iova_align (iovad , start ) - start );
1173+ }
1174+
1175+ return phys ;
1176+ }
1177+
11411178dma_addr_t iommu_dma_map_page (struct device * dev , struct page * page ,
11421179 unsigned long offset , size_t size , enum dma_data_direction dir ,
11431180 unsigned long attrs )
@@ -1151,42 +1188,14 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
11511188 dma_addr_t iova , dma_mask = dma_get_mask (dev );
11521189
11531190 /*
1154- * If both the physical buffer start address and size are
1155- * page aligned, we don't need to use a bounce page.
1191+ * If both the physical buffer start address and size are page aligned,
1192+ * we don't need to use a bounce page.
11561193 */
11571194 if (dev_use_swiotlb (dev , size , dir ) &&
11581195 iova_offset (iovad , phys | size )) {
1159- if (!is_swiotlb_active (dev )) {
1160- dev_warn_once (dev , "DMA bounce buffers are inactive, unable to map unaligned transaction.\n" );
1161- return DMA_MAPPING_ERROR ;
1162- }
1163-
1164- trace_swiotlb_bounced (dev , phys , size );
1165-
1166- phys = swiotlb_tbl_map_single (dev , phys , size ,
1167- iova_mask (iovad ), dir , attrs );
1168-
1169- if (phys == DMA_MAPPING_ERROR )
1196+ phys = iommu_dma_map_swiotlb (dev , phys , size , dir , attrs );
1197+ if (phys == (phys_addr_t )DMA_MAPPING_ERROR )
11701198 return DMA_MAPPING_ERROR ;
1171-
1172- /*
1173- * Untrusted devices should not see padding areas with random
1174- * leftover kernel data, so zero the pre- and post-padding.
1175- * swiotlb_tbl_map_single() has initialized the bounce buffer
1176- * proper to the contents of the original memory buffer.
1177- */
1178- if (dev_is_untrusted (dev )) {
1179- size_t start , virt = (size_t )phys_to_virt (phys );
1180-
1181- /* Pre-padding */
1182- start = iova_align_down (iovad , virt );
1183- memset ((void * )start , 0 , virt - start );
1184-
1185- /* Post-padding */
1186- start = virt + size ;
1187- memset ((void * )start , 0 ,
1188- iova_align (iovad , start ) - start );
1189- }
11901199 }
11911200
11921201 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC ))
0 commit comments