Skip to content
Permalink
Browse files
sparc: return error code from iommu_tbl_range_alloc()
Convert to long return type and return an error code so that it can be
returned all the way up by the .map_sg() ops.

Signed-off-by: Martin Oliveira <martin.oliveira@eideticom.com>
  • Loading branch information
iomartin committed Jun 21, 2021
1 parent 97a6566 commit 03f23088d07a92e502728eb2234a6ed52ea7794a
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 22 deletions.
@@ -94,12 +94,12 @@ void iommu_tbl_pool_init(struct iommu_map_table *iommu,
p->end = num_entries;
}

unsigned long iommu_tbl_range_alloc(struct device *dev,
struct iommu_map_table *iommu,
unsigned long npages,
unsigned long *handle,
unsigned long mask,
unsigned int align_order)
long iommu_tbl_range_alloc(struct device *dev,
struct iommu_map_table *iommu,
unsigned long npages,
unsigned long *handle,
unsigned long mask,
unsigned int align_order)
{
unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
unsigned long n, end, start, limit, boundary_size;
@@ -157,14 +157,14 @@ static inline iopte_t *alloc_npages(struct device *dev,
struct iommu *iommu,
unsigned long npages)
{
unsigned long entry;
long entry;

entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
(unsigned long)(-1), 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
if (unlikely((entry < 0))
return NULL;

return iommu->page_table + entry;
return iommu->page_table + (unsigned long) entry;
}

static int iommu_alloc_ctx(struct iommu *iommu)
@@ -475,7 +475,8 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
unsigned long paddr, npages, out_entry = 0, slen;
long entry;
iopte_t *base;

slen = s->length;
@@ -491,18 +492,18 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
&handle, (unsigned long)(-1), 0);

/* Handle failure */
if (unlikely(entry == IOMMU_ERROR_CODE)) {
if (unlikely(entry < 0)) {
if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
goto iommu_map_failed;
}

base = iommu->page_table + entry;
base = iommu->page_table + (unsigned long) entry;

/* Convert entry to a dma_addr_t */
dma_addr = iommu->tbl.table_map_base +
(entry << IO_PAGE_SHIFT);
((unsigned long) entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);

/* Insert into HW table */
@@ -535,7 +536,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
out_entry = entry;
out_entry = (unsigned long) entry;
}

/* Calculate next page pointer for contiguous check */
@@ -2021,7 +2021,7 @@ static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,

entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table,
npages, NULL, (unsigned long)-1, 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
if (unlikely(entry < 0))
return NULL;

return iommu->page_table + entry;
@@ -219,7 +219,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0);

if (unlikely(entry == IOMMU_ERROR_CODE))
if (unlikely(entry < 0))
goto range_alloc_fail;

*dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
@@ -385,7 +385,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0);

if (unlikely(entry == IOMMU_ERROR_CODE))
if (unlikely(entry < 0))
goto bad;

bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
@@ -520,7 +520,8 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;

for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
unsigned long paddr, npages, out_entry = 0, slen;
long entry;

slen = s->length;
/* Sanity check */
@@ -535,16 +536,17 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
&handle, (unsigned long)(-1), 0);

/* Handle failure */
if (unlikely(entry == IOMMU_ERROR_CODE)) {
if (unlikely(entry < 0)) {
pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
tbl, paddr, npages);
goto iommu_map_failed;
}

iommu_batch_new_entry(entry, mask);
iommu_batch_new_entry((unsigned long) entry, mask);

/* Convert entry to a dma_addr_t */
dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
dma_addr = tbl->table_map_base +
((unsigned long) entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);

/* Insert into HW table */
@@ -578,7 +580,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
out_entry = entry;
out_entry = (unsigned long) entry;
}

/* Calculate next page pointer for contiguous check */

0 comments on commit 03f2308

Please sign in to comment.