@@ -907,11 +907,12 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
907907}
908908
909909/**
910- * sba_map_single_attrs - map one buffer and return IOVA for DMA
910+ * sba_map_page - map one buffer and return IOVA for DMA
911911 * @dev: instance of PCI owned by the driver that's asking.
912- * @addr: driver buffer to map.
913- * @size: number of bytes to map in driver buffer.
914- * @dir: R/W or both.
912+ * @page: page to map
913+ * @poff: offset into page
914+ * @size: number of bytes to map
915+ * @dir: dma direction
915916 * @attrs: optional dma attributes
916917 *
917918 * See Documentation/DMA-API-HOWTO.txt
@@ -944,7 +945,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
944945 ** Device is bit capable of DMA'ing to the buffer...
945946 ** just return the PCI address of ptr
946947 */
947- DBG_BYPASS ("sba_map_single_attrs () bypass mask/addr: "
948+ DBG_BYPASS ("sba_map_page () bypass mask/addr: "
948949 "0x%lx/0x%lx\n" ,
949950 to_pci_dev (dev )-> dma_mask , pci_addr );
950951 return pci_addr ;
@@ -966,7 +967,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
966967
967968#ifdef ASSERT_PDIR_SANITY
968969 spin_lock_irqsave (& ioc -> res_lock , flags );
969- if (sba_check_pdir (ioc ,"Check before sba_map_single_attrs ()" ))
970+ if (sba_check_pdir (ioc ,"Check before sba_map_page ()" ))
970971 panic ("Sanity check failed" );
971972 spin_unlock_irqrestore (& ioc -> res_lock , flags );
972973#endif
@@ -997,20 +998,12 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
997998 /* form complete address */
998999#ifdef ASSERT_PDIR_SANITY
9991000 spin_lock_irqsave (& ioc -> res_lock , flags );
1000- sba_check_pdir (ioc ,"Check after sba_map_single_attrs ()" );
1001+ sba_check_pdir (ioc ,"Check after sba_map_page ()" );
10011002 spin_unlock_irqrestore (& ioc -> res_lock , flags );
10021003#endif
10031004 return SBA_IOVA (ioc , iovp , offset );
10041005}
10051006
1006- static dma_addr_t sba_map_single_attrs (struct device * dev , void * addr ,
1007- size_t size , enum dma_data_direction dir ,
1008- unsigned long attrs )
1009- {
1010- return sba_map_page (dev , virt_to_page (addr ),
1011- (unsigned long )addr & ~PAGE_MASK , size , dir , attrs );
1012- }
1013-
10141007#ifdef ENABLE_MARK_CLEAN
10151008static SBA_INLINE void
10161009sba_mark_clean (struct ioc * ioc , dma_addr_t iova , size_t size )
@@ -1036,7 +1029,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
10361029#endif
10371030
10381031/**
1039- * sba_unmap_single_attrs - unmap one IOVA and free resources
1032+ * sba_unmap_page - unmap one IOVA and free resources
10401033 * @dev: instance of PCI owned by the driver that's asking.
10411034 * @iova: IOVA of driver buffer previously mapped.
10421035 * @size: number of bytes mapped in driver buffer.
@@ -1063,7 +1056,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
10631056 /*
10641057 ** Address does not fall w/in IOVA, must be bypassing
10651058 */
1066- DBG_BYPASS ("sba_unmap_single_attrs () bypass addr: 0x%lx\n" ,
1059+ DBG_BYPASS ("sba_unmap_page () bypass addr: 0x%lx\n" ,
10671060 iova );
10681061
10691062#ifdef ENABLE_MARK_CLEAN
@@ -1114,12 +1107,6 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
11141107#endif /* DELAYED_RESOURCE_CNT == 0 */
11151108}
11161109
1117- void sba_unmap_single_attrs (struct device * dev , dma_addr_t iova , size_t size ,
1118- enum dma_data_direction dir , unsigned long attrs )
1119- {
1120- sba_unmap_page (dev , iova , size , dir , attrs );
1121- }
1122-
11231110/**
11241111 * sba_alloc_coherent - allocate/map shared mem for DMA
11251112 * @dev: instance of PCI owned by the driver that's asking.
@@ -1132,30 +1119,24 @@ static void *
11321119sba_alloc_coherent (struct device * dev , size_t size , dma_addr_t * dma_handle ,
11331120 gfp_t flags , unsigned long attrs )
11341121{
1122+ struct page * page ;
11351123 struct ioc * ioc ;
1124+ int node = -1 ;
11361125 void * addr ;
11371126
11381127 ioc = GET_IOC (dev );
11391128 ASSERT (ioc );
1140-
11411129#ifdef CONFIG_NUMA
1142- {
1143- struct page * page ;
1144-
1145- page = alloc_pages_node (ioc -> node , flags , get_order (size ));
1146- if (unlikely (!page ))
1147- return NULL ;
1148-
1149- addr = page_address (page );
1150- }
1151- #else
1152- addr = (void * ) __get_free_pages (flags , get_order (size ));
1130+ node = ioc -> node ;
11531131#endif
1154- if (unlikely (!addr ))
1132+
1133+ page = alloc_pages_node (node , flags , get_order (size ));
1134+ if (unlikely (!page ))
11551135 return NULL ;
11561136
1137+ addr = page_address (page );
11571138 memset (addr , 0 , size );
1158- * dma_handle = virt_to_phys ( addr );
1139+ * dma_handle = page_to_phys ( page );
11591140
11601141#ifdef ALLOW_IOV_BYPASS
11611142 ASSERT (dev -> coherent_dma_mask );
@@ -1174,9 +1155,10 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
11741155 * If device can't bypass or bypass is disabled, pass the 32bit fake
11751156 * device to map single to get an iova mapping.
11761157 */
1177- * dma_handle = sba_map_single_attrs (& ioc -> sac_only_dev -> dev , addr ,
1178- size , 0 , 0 );
1179-
1158+ * dma_handle = sba_map_page (& ioc -> sac_only_dev -> dev , page , 0 , size ,
1159+ DMA_BIDIRECTIONAL , 0 );
1160+ if (dma_mapping_error (dev , * dma_handle ))
1161+ return NULL ;
11801162 return addr ;
11811163}
11821164
@@ -1193,7 +1175,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
11931175static void sba_free_coherent (struct device * dev , size_t size , void * vaddr ,
11941176 dma_addr_t dma_handle , unsigned long attrs )
11951177{
1196- sba_unmap_single_attrs (dev , dma_handle , size , 0 , 0 );
1178+ sba_unmap_page (dev , dma_handle , size , 0 , 0 );
11971179 free_pages ((unsigned long ) vaddr , get_order (size ));
11981180}
11991181
@@ -1483,7 +1465,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
14831465 /* Fast path single entry scatterlists. */
14841466 if (nents == 1 ) {
14851467 sglist -> dma_length = sglist -> length ;
1486- sglist -> dma_address = sba_map_single_attrs (dev , sba_sg_address (sglist ), sglist -> length , dir , attrs );
1468+ sglist -> dma_address = sba_map_page (dev , sg_page (sglist ),
1469+ sglist -> offset , sglist -> length , dir , attrs );
1470+ if (dma_mapping_error (dev , sglist -> dma_address ))
1471+ return 0 ;
14871472 return 1 ;
14881473 }
14891474
@@ -1572,8 +1557,8 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
15721557
15731558 while (nents && sglist -> dma_length ) {
15741559
1575- sba_unmap_single_attrs (dev , sglist -> dma_address ,
1576- sglist -> dma_length , dir , attrs );
1560+ sba_unmap_page (dev , sglist -> dma_address , sglist -> dma_length ,
1561+ dir , attrs );
15771562 sglist = sg_next (sglist );
15781563 nents -- ;
15791564 }
0 commit comments