Skip to content

Commit 2c7ad99

Browse files
robclarkRob Clark
authored andcommitted
drm/msm: Add mmu support for non-zero offset
Only needs to be supported for iopgtables mmu, the other cases are either only used for kernel managed mappings (where offset is always zero) or devices which do not support sparse bindings. Signed-off-by: Rob Clark <robdclark@chromium.org> Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com> Tested-by: Antonino Maniscalco <antomani103@gmail.com> Reviewed-by: Antonino Maniscalco <antomani103@gmail.com> Patchwork: https://patchwork.freedesktop.org/patch/661501/
1 parent 62a28e2 commit 2c7ad99

File tree

6 files changed

+36
-14
lines changed

6 files changed

+36
-14
lines changed

drivers/gpu/drm/msm/adreno/a2xx_gpummu.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,13 +29,16 @@ static void a2xx_gpummu_detach(struct msm_mmu *mmu)
2929
}
3030

3131
static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
32-
struct sg_table *sgt, size_t len, int prot)
32+
struct sg_table *sgt, size_t off, size_t len,
33+
int prot)
3334
{
3435
struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
3536
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
3637
struct sg_dma_page_iter dma_iter;
3738
unsigned prot_bits = 0;
3839

40+
WARN_ON(off != 0);
41+
3942
if (prot & IOMMU_WRITE)
4043
prot_bits |= 1;
4144
if (prot & IOMMU_READ)

drivers/gpu/drm/msm/msm_gem.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -453,7 +453,7 @@ static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
453453
vma = lookup_vma(obj, vm);
454454

455455
if (!vma) {
456-
vma = msm_gem_vma_new(vm, obj, range_start, range_end);
456+
vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end);
457457
} else {
458458
GEM_WARN_ON(vma->va.addr < range_start);
459459
GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
@@ -491,7 +491,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
491491
if (IS_ERR(pages))
492492
return PTR_ERR(pages);
493493

494-
return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
494+
return msm_gem_vma_map(vma, prot, msm_obj->sgt);
495495
}
496496

497497
void msm_gem_unpin_locked(struct drm_gem_object *obj)

drivers/gpu/drm/msm/msm_gem.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,9 +110,9 @@ struct msm_gem_vma {
110110

111111
struct drm_gpuva *
112112
msm_gem_vma_new(struct drm_gpuvm *vm, struct drm_gem_object *obj,
113-
u64 range_start, u64 range_end);
113+
u64 offset, u64 range_start, u64 range_end);
114114
void msm_gem_vma_purge(struct drm_gpuva *vma);
115-
int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt, int size);
115+
int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt);
116116
void msm_gem_vma_close(struct drm_gpuva *vma);
117117

118118
struct msm_gem_object {

drivers/gpu/drm/msm/msm_gem_vma.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,7 @@ void msm_gem_vma_purge(struct drm_gpuva *vma)
3838

3939
/* Map and pin vma: */
4040
int
41-
msm_gem_vma_map(struct drm_gpuva *vma, int prot,
42-
struct sg_table *sgt, int size)
41+
msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
4342
{
4443
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
4544
struct msm_gem_vm *vm = to_msm_vm(vma->vm);
@@ -62,8 +61,9 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot,
6261
* Revisit this if we can come up with a scheme to pre-alloc pages
6362
* for the pgtable in map/unmap ops.
6463
*/
65-
ret = vm->mmu->funcs->map(vm->mmu, vma->va.addr, sgt, size, prot);
66-
64+
ret = vm->mmu->funcs->map(vm->mmu, vma->va.addr, sgt,
65+
vma->gem.offset, vma->va.range,
66+
prot);
6767
if (ret) {
6868
msm_vma->mapped = false;
6969
}
@@ -93,7 +93,7 @@ void msm_gem_vma_close(struct drm_gpuva *vma)
9393
/* Create a new vma and allocate an iova for it */
9494
struct drm_gpuva *
9595
msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
96-
u64 range_start, u64 range_end)
96+
u64 offset, u64 range_start, u64 range_end)
9797
{
9898
struct msm_gem_vm *vm = to_msm_vm(gpuvm);
9999
struct drm_gpuvm_bo *vm_bo;
@@ -107,6 +107,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
107107
return ERR_PTR(-ENOMEM);
108108

109109
if (vm->managed) {
110+
BUG_ON(offset != 0);
110111
ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
111112
obj->size, PAGE_SIZE, 0,
112113
range_start, range_end, 0);
@@ -120,7 +121,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
120121

121122
GEM_WARN_ON((range_end - range_start) > obj->size);
122123

123-
drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, 0);
124+
drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
124125
vma->mapped = false;
125126

126127
ret = drm_gpuva_insert(&vm->base, &vma->base);

drivers/gpu/drm/msm/msm_iommu.c

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,8 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
113113
}
114114

115115
static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
116-
struct sg_table *sgt, size_t len, int prot)
116+
struct sg_table *sgt, size_t off, size_t len,
117+
int prot)
117118
{
118119
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
119120
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
@@ -125,6 +126,19 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
125126
size_t size = sg->length;
126127
phys_addr_t phys = sg_phys(sg);
127128

129+
if (!len)
130+
break;
131+
132+
if (size <= off) {
133+
off -= size;
134+
continue;
135+
}
136+
137+
phys += off;
138+
size -= off;
139+
size = min_t(size_t, size, len);
140+
off = 0;
141+
128142
while (size) {
129143
size_t pgsize, count, mapped = 0;
130144
int ret;
@@ -140,6 +154,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
140154
phys += mapped;
141155
addr += mapped;
142156
size -= mapped;
157+
len -= mapped;
143158

144159
if (ret) {
145160
msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
@@ -388,11 +403,14 @@ static void msm_iommu_detach(struct msm_mmu *mmu)
388403
}
389404

390405
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
391-
struct sg_table *sgt, size_t len, int prot)
406+
struct sg_table *sgt, size_t off, size_t len,
407+
int prot)
392408
{
393409
struct msm_iommu *iommu = to_msm_iommu(mmu);
394410
size_t ret;
395411

412+
WARN_ON(off != 0);
413+
396414
/* The arm-smmu driver expects the addresses to be sign extended */
397415
if (iova & BIT_ULL(48))
398416
iova |= GENMASK_ULL(63, 49);

drivers/gpu/drm/msm/msm_mmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
struct msm_mmu_funcs {
1313
void (*detach)(struct msm_mmu *mmu);
1414
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
15-
size_t len, int prot);
15+
size_t off, size_t len, int prot);
1616
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
1717
void (*destroy)(struct msm_mmu *mmu);
1818
void (*set_stall)(struct msm_mmu *mmu, bool enable);

0 commit comments

Comments
 (0)