@@ -50,8 +50,6 @@ static unsigned long iommu_pages; /* .. and in pages */
5050
5151static u32 * iommu_gatt_base ; /* Remapping table */
5252
53- static dma_addr_t bad_dma_addr ;
54-
5553/*
5654 * If this is disabled the IOMMU will use an optimized flushing strategy
5755 * of only flushing when an mapping is reused. With it true the GART is
@@ -74,8 +72,6 @@ static u32 gart_unmapped_entry;
7472 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
7573#define GPTE_DECODE (x ) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
7674
77- #define EMERGENCY_PAGES 32 /* = 128KB */
78-
7975#ifdef CONFIG_AGP
8076#define AGPEXTERN extern
8177#else
@@ -184,14 +180,6 @@ static void iommu_full(struct device *dev, size_t size, int dir)
184180 */
185181
186182 dev_err (dev , "PCI-DMA: Out of IOMMU space for %lu bytes\n" , size );
187-
188- if (size > PAGE_SIZE * EMERGENCY_PAGES ) {
189- if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL )
190- panic ("PCI-DMA: Memory would be corrupted\n" );
191- if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL )
192- panic (KERN_ERR
193- "PCI-DMA: Random memory would be DMAed\n" );
194- }
195183#ifdef CONFIG_IOMMU_LEAK
196184 dump_leak ();
197185#endif
@@ -220,7 +208,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
220208 int i ;
221209
222210 if (unlikely (phys_mem + size > GART_MAX_PHYS_ADDR ))
223- return bad_dma_addr ;
211+ return DMA_MAPPING_ERROR ;
224212
225213 iommu_page = alloc_iommu (dev , npages , align_mask );
226214 if (iommu_page == -1 ) {
@@ -229,7 +217,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
229217 if (panic_on_overflow )
230218 panic ("dma_map_area overflow %lu bytes\n" , size );
231219 iommu_full (dev , size , dir );
232- return bad_dma_addr ;
220+ return DMA_MAPPING_ERROR ;
233221 }
234222
235223 for (i = 0 ; i < npages ; i ++ ) {
@@ -271,7 +259,7 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
271259 int npages ;
272260 int i ;
273261
274- if (dma_addr < iommu_bus_base + EMERGENCY_PAGES * PAGE_SIZE ||
262+ if (dma_addr == DMA_MAPPING_ERROR ||
275263 dma_addr >= iommu_bus_base + iommu_size )
276264 return ;
277265
@@ -315,7 +303,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
315303
316304 if (nonforced_iommu (dev , addr , s -> length )) {
317305 addr = dma_map_area (dev , addr , s -> length , dir , 0 );
318- if (addr == bad_dma_addr ) {
306+ if (addr == DMA_MAPPING_ERROR ) {
319307 if (i > 0 )
320308 gart_unmap_sg (dev , sg , i , dir , 0 );
321309 nents = 0 ;
@@ -471,7 +459,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
471459
472460 iommu_full (dev , pages << PAGE_SHIFT , dir );
473461 for_each_sg (sg , s , nents , i )
474- s -> dma_address = bad_dma_addr ;
462+ s -> dma_address = DMA_MAPPING_ERROR ;
475463 return 0 ;
476464}
477465
@@ -490,7 +478,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
490478 * dma_addr = dma_map_area (dev , virt_to_phys (vaddr ), size ,
491479 DMA_BIDIRECTIONAL , (1UL << get_order (size )) - 1 );
492480 flush_gart ();
493- if (unlikely (* dma_addr == bad_dma_addr ))
481+ if (unlikely (* dma_addr == DMA_MAPPING_ERROR ))
494482 goto out_free ;
495483 return vaddr ;
496484out_free :
@@ -507,11 +495,6 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
507495 dma_direct_free_pages (dev , size , vaddr , dma_addr , attrs );
508496}
509497
510- static int gart_mapping_error (struct device * dev , dma_addr_t dma_addr )
511- {
512- return (dma_addr == bad_dma_addr );
513- }
514-
515498static int no_agp ;
516499
517500static __init unsigned long check_iommu_size (unsigned long aper , u64 aper_size )
@@ -695,7 +678,6 @@ static const struct dma_map_ops gart_dma_ops = {
695678 .unmap_page = gart_unmap_page ,
696679 .alloc = gart_alloc_coherent ,
697680 .free = gart_free_coherent ,
698- .mapping_error = gart_mapping_error ,
699681 .dma_supported = dma_direct_supported ,
700682};
701683
@@ -730,7 +712,6 @@ int __init gart_iommu_init(void)
730712 unsigned long aper_base , aper_size ;
731713 unsigned long start_pfn , end_pfn ;
732714 unsigned long scratch ;
733- long i ;
734715
735716 if (!amd_nb_has_feature (AMD_NB_GART ))
736717 return 0 ;
@@ -784,19 +765,12 @@ int __init gart_iommu_init(void)
784765 }
785766#endif
786767
787- /*
788- * Out of IOMMU space handling.
789- * Reserve some invalid pages at the beginning of the GART.
790- */
791- bitmap_set (iommu_gart_bitmap , 0 , EMERGENCY_PAGES );
792-
793768 pr_info ("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n" ,
794769 iommu_size >> 20 );
795770
796771 agp_memory_reserved = iommu_size ;
797772 iommu_start = aper_size - iommu_size ;
798773 iommu_bus_base = info .aper_base + iommu_start ;
799- bad_dma_addr = iommu_bus_base ;
800774 iommu_gatt_base = agp_gatt_table + (iommu_start >>PAGE_SHIFT );
801775
802776 /*
@@ -838,8 +812,6 @@ int __init gart_iommu_init(void)
838812 if (!scratch )
839813 panic ("Cannot allocate iommu scratch page" );
840814 gart_unmapped_entry = GPTE_ENCODE (__pa (scratch ));
841- for (i = EMERGENCY_PAGES ; i < iommu_pages ; i ++ )
842- iommu_gatt_base [i ] = gart_unmapped_entry ;
843815
844816 flush_gart ();
845817 dma_ops = & gart_dma_ops ;
0 commit comments