Skip to content

Commit 5e72b2b

Browse files
vivekkreddyakpm00
authored andcommitted
udmabuf: convert udmabuf driver to use folios
This is mainly a preparatory patch to use memfd_pin_folios() API for pinning folios. Using folios instead of pages makes sense as the udmabuf driver needs to handle both shmem and hugetlb cases. And, using the memfd_pin_folios() API makes this easier as we no longer need to separately handle shmem vs hugetlb cases in the udmabuf driver. Note that, the function vmap_udmabuf() still needs a list of pages; so, we collect all the head pages into a local array in this case. Other changes in this patch include the addition of helpers for checking the memfd seals and exporting dmabuf. Moving code from udmabuf_create() into these helpers improves readability given that udmabuf_create() is a bit long. Link: https://lkml.kernel.org/r/20240624063952.1572359-8-vivek.kasireddy@intel.com Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com> Acked-by: Dave Airlie <airlied@redhat.com> Acked-by: Gerd Hoffmann <kraxel@redhat.com> Cc: David Hildenbrand <david@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Hugh Dickins <hughd@google.com> Cc: Peter Xu <peterx@redhat.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Dongwon Kim <dongwon.kim@intel.com> Cc: Junxiao Chang <junxiao.chang@intel.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christoph Hellwig <hch@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Shuah Khan <shuah@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 0c8b91e commit 5e72b2b

File tree

1 file changed

+83
-56
lines changed

1 file changed

+83
-56
lines changed

drivers/dma-buf/udmabuf.c

Lines changed: 83 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is
2626

2727
struct udmabuf {
2828
pgoff_t pagecount;
29-
struct page **pages;
29+
struct folio **folios;
3030
struct sg_table *sg;
3131
struct miscdevice *device;
3232
pgoff_t *offsets;
@@ -42,7 +42,7 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
4242
if (pgoff >= ubuf->pagecount)
4343
return VM_FAULT_SIGBUS;
4444

45-
pfn = page_to_pfn(ubuf->pages[pgoff]);
45+
pfn = folio_pfn(ubuf->folios[pgoff]);
4646
pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
4747

4848
return vmf_insert_pfn(vma, vmf->address, pfn);
@@ -68,11 +68,21 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
6868
static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
6969
{
7070
struct udmabuf *ubuf = buf->priv;
71+
struct page **pages;
7172
void *vaddr;
73+
pgoff_t pg;
7274

7375
dma_resv_assert_held(buf->resv);
7476

75-
vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
77+
pages = kmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
78+
if (!pages)
79+
return -ENOMEM;
80+
81+
for (pg = 0; pg < ubuf->pagecount; pg++)
82+
pages[pg] = &ubuf->folios[pg]->page;
83+
84+
vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
85+
kfree(pages);
7686
if (!vaddr)
7787
return -EINVAL;
7888

@@ -107,7 +117,8 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
107117
goto err_alloc;
108118

109119
for_each_sg(sg->sgl, sgl, ubuf->pagecount, i)
110-
sg_set_page(sgl, ubuf->pages[i], PAGE_SIZE, ubuf->offsets[i]);
120+
sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE,
121+
ubuf->offsets[i]);
111122

112123
ret = dma_map_sgtable(dev, sg, direction, 0);
113124
if (ret < 0)
@@ -152,9 +163,9 @@ static void release_udmabuf(struct dma_buf *buf)
152163
put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
153164

154165
for (pg = 0; pg < ubuf->pagecount; pg++)
155-
put_page(ubuf->pages[pg]);
166+
folio_put(ubuf->folios[pg]);
156167
kfree(ubuf->offsets);
157-
kfree(ubuf->pages);
168+
kfree(ubuf->folios);
158169
kfree(ubuf);
159170
}
160171

@@ -215,36 +226,33 @@ static int handle_hugetlb_pages(struct udmabuf *ubuf, struct file *memfd,
215226
pgoff_t mapidx = offset >> huge_page_shift(hpstate);
216227
pgoff_t subpgoff = (offset & ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
217228
pgoff_t maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
218-
struct page *hpage = NULL;
219-
struct folio *folio;
229+
struct folio *folio = NULL;
220230
pgoff_t pgidx;
221231

222232
mapidx <<= huge_page_order(hpstate);
223233
for (pgidx = 0; pgidx < pgcnt; pgidx++) {
224-
if (!hpage) {
234+
if (!folio) {
225235
folio = __filemap_get_folio(memfd->f_mapping,
226236
mapidx,
227237
FGP_ACCESSED, 0);
228238
if (IS_ERR(folio))
229239
return PTR_ERR(folio);
230-
231-
hpage = &folio->page;
232240
}
233241

234-
get_page(hpage);
235-
ubuf->pages[*pgbuf] = hpage;
242+
folio_get(folio);
243+
ubuf->folios[*pgbuf] = folio;
236244
ubuf->offsets[*pgbuf] = subpgoff << PAGE_SHIFT;
237245
(*pgbuf)++;
238246
if (++subpgoff == maxsubpgs) {
239-
put_page(hpage);
240-
hpage = NULL;
247+
folio_put(folio);
248+
folio = NULL;
241249
subpgoff = 0;
242250
mapidx += pages_per_huge_page(hpstate);
243251
}
244252
}
245253

246-
if (hpage)
247-
put_page(hpage);
254+
if (folio)
255+
folio_put(folio);
248256

249257
return 0;
250258
}
@@ -254,31 +262,69 @@ static int handle_shmem_pages(struct udmabuf *ubuf, struct file *memfd,
254262
pgoff_t *pgbuf)
255263
{
256264
pgoff_t pgidx, pgoff = offset >> PAGE_SHIFT;
257-
struct page *page;
265+
struct folio *folio = NULL;
258266

259267
for (pgidx = 0; pgidx < pgcnt; pgidx++) {
260-
page = shmem_read_mapping_page(memfd->f_mapping,
261-
pgoff + pgidx);
262-
if (IS_ERR(page))
263-
return PTR_ERR(page);
268+
folio = shmem_read_folio(memfd->f_mapping, pgoff + pgidx);
269+
if (IS_ERR(folio))
270+
return PTR_ERR(folio);
264271

265-
ubuf->pages[*pgbuf] = page;
272+
ubuf->folios[*pgbuf] = folio;
266273
(*pgbuf)++;
267274
}
268275

269276
return 0;
270277
}
271278

279+
static int check_memfd_seals(struct file *memfd)
280+
{
281+
int seals;
282+
283+
if (!memfd)
284+
return -EBADFD;
285+
286+
if (!shmem_file(memfd) && !is_file_hugepages(memfd))
287+
return -EBADFD;
288+
289+
seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
290+
if (seals == -EINVAL)
291+
return -EBADFD;
292+
293+
if ((seals & SEALS_WANTED) != SEALS_WANTED ||
294+
(seals & SEALS_DENIED) != 0)
295+
return -EINVAL;
296+
297+
return 0;
298+
}
299+
300+
static int export_udmabuf(struct udmabuf *ubuf,
301+
struct miscdevice *device,
302+
u32 flags)
303+
{
304+
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
305+
struct dma_buf *buf;
306+
307+
ubuf->device = device;
308+
exp_info.ops = &udmabuf_ops;
309+
exp_info.size = ubuf->pagecount << PAGE_SHIFT;
310+
exp_info.priv = ubuf;
311+
exp_info.flags = O_RDWR;
312+
313+
buf = dma_buf_export(&exp_info);
314+
if (IS_ERR(buf))
315+
return PTR_ERR(buf);
316+
317+
return dma_buf_fd(buf, flags);
318+
}
319+
272320
static long udmabuf_create(struct miscdevice *device,
273321
struct udmabuf_create_list *head,
274322
struct udmabuf_create_item *list)
275323
{
276-
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
324+
pgoff_t pgcnt, pgbuf = 0, pglimit;
277325
struct file *memfd = NULL;
278326
struct udmabuf *ubuf;
279-
struct dma_buf *buf;
280-
pgoff_t pgcnt, pgbuf = 0, pglimit;
281-
int seals, ret = -EINVAL;
327+
int ret = -EINVAL;
282328
u32 i, flags;
283329

284330
ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
@@ -299,9 +345,9 @@ static long udmabuf_create(struct miscdevice *device,
299345
if (!ubuf->pagecount)
300346
goto err;
301347

302-
ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
348+
ubuf->folios = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios),
303349
GFP_KERNEL);
304-
if (!ubuf->pages) {
350+
if (!ubuf->folios) {
305351
ret = -ENOMEM;
306352
goto err;
307353
}
@@ -314,18 +360,9 @@ static long udmabuf_create(struct miscdevice *device,
314360

315361
pgbuf = 0;
316362
for (i = 0; i < head->count; i++) {
317-
ret = -EBADFD;
318363
memfd = fget(list[i].memfd);
319-
if (!memfd)
320-
goto err;
321-
if (!shmem_file(memfd) && !is_file_hugepages(memfd))
322-
goto err;
323-
seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
324-
if (seals == -EINVAL)
325-
goto err;
326-
ret = -EINVAL;
327-
if ((seals & SEALS_WANTED) != SEALS_WANTED ||
328-
(seals & SEALS_DENIED) != 0)
364+
ret = check_memfd_seals(memfd);
365+
if (ret < 0)
329366
goto err;
330367

331368
pgcnt = list[i].size >> PAGE_SHIFT;
@@ -344,30 +381,20 @@ static long udmabuf_create(struct miscdevice *device,
344381
memfd = NULL;
345382
}
346383

347-
exp_info.ops = &udmabuf_ops;
348-
exp_info.size = ubuf->pagecount << PAGE_SHIFT;
349-
exp_info.priv = ubuf;
350-
exp_info.flags = O_RDWR;
351-
352-
ubuf->device = device;
353-
buf = dma_buf_export(&exp_info);
354-
if (IS_ERR(buf)) {
355-
ret = PTR_ERR(buf);
384+
flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
385+
ret = export_udmabuf(ubuf, device, flags);
386+
if (ret < 0)
356387
goto err;
357-
}
358388

359-
flags = 0;
360-
if (head->flags & UDMABUF_FLAGS_CLOEXEC)
361-
flags |= O_CLOEXEC;
362-
return dma_buf_fd(buf, flags);
389+
return ret;
363390

364391
err:
365392
while (pgbuf > 0)
366-
put_page(ubuf->pages[--pgbuf]);
393+
folio_put(ubuf->folios[--pgbuf]);
367394
if (memfd)
368395
fput(memfd);
369396
kfree(ubuf->offsets);
370-
kfree(ubuf->pages);
397+
kfree(ubuf->folios);
371398
kfree(ubuf);
372399
return ret;
373400
}

0 commit comments

Comments
 (0)