Skip to content

Commit 72d092f

Browse files
Umang Jaingregkh
authored andcommitted
staging: vchiq_core: Move bulk data functions in vchiq_core
Bulk transfers core logic lives in vchiq_core.c, hence move all the preparatory bulk data allocation helpers to vchiq_core.c (from vchiq_arm). The discrepancy was noticed when vchiq_prepare_bulk_data() and vchiq_complete_bulk() are being used vchiq_core.c but are defined in vchiq_arm. Now that they are now confined to vchiq_core.c, they can be made static and their signatures from vchiq_core header can be dropped. vchiq_prepare_bulk_data() and vchiq_complete_bulk() depends on struct vchiq_pagelist_info, cleanup_pagelist(), free_pagelist() and create_pagelist() hence they are pulled in from vchiq_arm as well, as part of this commit. No functional changes intended in this patch. Signed-off-by: Umang Jain <umang.jain@ideasonboard.com> Reviewed-by: Dan Carpenter <dan.carpenter@linaro.org> Link: https://lore.kernel.org/r/20240919142130.1331495-3-umang.jain@ideasonboard.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent ce64433 commit 72d092f

File tree

3 files changed

+338
-342
lines changed

3 files changed

+338
-342
lines changed

drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c

Lines changed: 0 additions & 337 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
#include <linux/device.h>
1515
#include <linux/device/bus.h>
1616
#include <linux/mm.h>
17-
#include <linux/highmem.h>
1817
#include <linux/pagemap.h>
1918
#include <linux/bug.h>
2019
#include <linux/completion.h>
@@ -36,7 +35,6 @@
3635
#include "vchiq_arm.h"
3736
#include "vchiq_bus.h"
3837
#include "vchiq_debugfs.h"
39-
#include "vchiq_pagelist.h"
4038

4139
#define DEVICE_NAME "vchiq"
4240

@@ -108,17 +106,6 @@ struct vchiq_arm_state {
108106
int first_connect;
109107
};
110108

111-
struct vchiq_pagelist_info {
112-
struct pagelist *pagelist;
113-
size_t pagelist_buffer_size;
114-
dma_addr_t dma_addr;
115-
enum dma_data_direction dma_dir;
116-
unsigned int num_pages;
117-
unsigned int pages_need_release;
118-
struct page **pages;
119-
struct scatterlist *scatterlist;
120-
unsigned int scatterlist_mapped;
121-
};
122109

123110
static int
124111
vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
@@ -145,35 +132,6 @@ vchiq_doorbell_irq(int irq, void *dev_id)
145132
return ret;
146133
}
147134

148-
static void
149-
cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo)
150-
{
151-
if (pagelistinfo->scatterlist_mapped) {
152-
dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
153-
pagelistinfo->num_pages, pagelistinfo->dma_dir);
154-
}
155-
156-
if (pagelistinfo->pages_need_release)
157-
unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
158-
159-
dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size,
160-
pagelistinfo->pagelist, pagelistinfo->dma_addr);
161-
}
162-
163-
static inline bool
164-
is_adjacent_block(u32 *addrs, dma_addr_t addr, unsigned int k)
165-
{
166-
u32 tmp;
167-
168-
if (!k)
169-
return false;
170-
171-
tmp = (addrs[k - 1] & PAGE_MASK) +
172-
(((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT);
173-
174-
return tmp == (addr & PAGE_MASK);
175-
}
176-
177135
/*
178136
* This function is called by the vchiq stack once it has been connected to
179137
* the videocore and clients can start to use the stack.
@@ -224,270 +182,6 @@ void vchiq_add_connected_callback(struct vchiq_device *device, void (*callback)(
224182
}
225183
EXPORT_SYMBOL(vchiq_add_connected_callback);
226184

227-
/* There is a potential problem with partial cache lines (pages?)
228-
* at the ends of the block when reading. If the CPU accessed anything in
229-
* the same line (page?) then it may have pulled old data into the cache,
230-
* obscuring the new data underneath. We can solve this by transferring the
231-
* partial cache lines separately, and allowing the ARM to copy into the
232-
* cached area.
233-
*/
234-
235-
static struct vchiq_pagelist_info *
236-
create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
237-
size_t count, unsigned short type)
238-
{
239-
struct vchiq_drv_mgmt *drv_mgmt;
240-
struct pagelist *pagelist;
241-
struct vchiq_pagelist_info *pagelistinfo;
242-
struct page **pages;
243-
u32 *addrs;
244-
unsigned int num_pages, offset, i, k;
245-
int actual_pages;
246-
size_t pagelist_size;
247-
struct scatterlist *scatterlist, *sg;
248-
int dma_buffers;
249-
dma_addr_t dma_addr;
250-
251-
if (count >= INT_MAX - PAGE_SIZE)
252-
return NULL;
253-
254-
drv_mgmt = dev_get_drvdata(instance->state->dev);
255-
256-
if (buf)
257-
offset = (uintptr_t)buf & (PAGE_SIZE - 1);
258-
else
259-
offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
260-
num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
261-
262-
if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
263-
sizeof(struct vchiq_pagelist_info)) /
264-
(sizeof(u32) + sizeof(pages[0]) +
265-
sizeof(struct scatterlist)))
266-
return NULL;
267-
268-
pagelist_size = sizeof(struct pagelist) +
269-
(num_pages * sizeof(u32)) +
270-
(num_pages * sizeof(pages[0]) +
271-
(num_pages * sizeof(struct scatterlist))) +
272-
sizeof(struct vchiq_pagelist_info);
273-
274-
/* Allocate enough storage to hold the page pointers and the page
275-
* list
276-
*/
277-
pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
278-
GFP_KERNEL);
279-
280-
dev_dbg(instance->state->dev, "arm: %pK\n", pagelist);
281-
282-
if (!pagelist)
283-
return NULL;
284-
285-
addrs = pagelist->addrs;
286-
pages = (struct page **)(addrs + num_pages);
287-
scatterlist = (struct scatterlist *)(pages + num_pages);
288-
pagelistinfo = (struct vchiq_pagelist_info *)
289-
(scatterlist + num_pages);
290-
291-
pagelist->length = count;
292-
pagelist->type = type;
293-
pagelist->offset = offset;
294-
295-
/* Populate the fields of the pagelistinfo structure */
296-
pagelistinfo->pagelist = pagelist;
297-
pagelistinfo->pagelist_buffer_size = pagelist_size;
298-
pagelistinfo->dma_addr = dma_addr;
299-
pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
300-
DMA_TO_DEVICE : DMA_FROM_DEVICE;
301-
pagelistinfo->num_pages = num_pages;
302-
pagelistinfo->pages_need_release = 0;
303-
pagelistinfo->pages = pages;
304-
pagelistinfo->scatterlist = scatterlist;
305-
pagelistinfo->scatterlist_mapped = 0;
306-
307-
if (buf) {
308-
unsigned long length = count;
309-
unsigned int off = offset;
310-
311-
for (actual_pages = 0; actual_pages < num_pages;
312-
actual_pages++) {
313-
struct page *pg =
314-
vmalloc_to_page((buf +
315-
(actual_pages * PAGE_SIZE)));
316-
size_t bytes = PAGE_SIZE - off;
317-
318-
if (!pg) {
319-
cleanup_pagelistinfo(instance, pagelistinfo);
320-
return NULL;
321-
}
322-
323-
if (bytes > length)
324-
bytes = length;
325-
pages[actual_pages] = pg;
326-
length -= bytes;
327-
off = 0;
328-
}
329-
/* do not try and release vmalloc pages */
330-
} else {
331-
actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages,
332-
type == PAGELIST_READ, pages);
333-
334-
if (actual_pages != num_pages) {
335-
dev_dbg(instance->state->dev, "arm: Only %d/%d pages locked\n",
336-
actual_pages, num_pages);
337-
338-
/* This is probably due to the process being killed */
339-
if (actual_pages > 0)
340-
unpin_user_pages(pages, actual_pages);
341-
cleanup_pagelistinfo(instance, pagelistinfo);
342-
return NULL;
343-
}
344-
/* release user pages */
345-
pagelistinfo->pages_need_release = 1;
346-
}
347-
348-
/*
349-
* Initialize the scatterlist so that the magic cookie
350-
* is filled if debugging is enabled
351-
*/
352-
sg_init_table(scatterlist, num_pages);
353-
/* Now set the pages for each scatterlist */
354-
for (i = 0; i < num_pages; i++) {
355-
unsigned int len = PAGE_SIZE - offset;
356-
357-
if (len > count)
358-
len = count;
359-
sg_set_page(scatterlist + i, pages[i], len, offset);
360-
offset = 0;
361-
count -= len;
362-
}
363-
364-
dma_buffers = dma_map_sg(instance->state->dev,
365-
scatterlist,
366-
num_pages,
367-
pagelistinfo->dma_dir);
368-
369-
if (dma_buffers == 0) {
370-
cleanup_pagelistinfo(instance, pagelistinfo);
371-
return NULL;
372-
}
373-
374-
pagelistinfo->scatterlist_mapped = 1;
375-
376-
/* Combine adjacent blocks for performance */
377-
k = 0;
378-
for_each_sg(scatterlist, sg, dma_buffers, i) {
379-
unsigned int len = sg_dma_len(sg);
380-
dma_addr_t addr = sg_dma_address(sg);
381-
382-
/* Note: addrs is the address + page_count - 1
383-
* The firmware expects blocks after the first to be page-
384-
* aligned and a multiple of the page size
385-
*/
386-
WARN_ON(len == 0);
387-
WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
388-
WARN_ON(i && (addr & ~PAGE_MASK));
389-
if (is_adjacent_block(addrs, addr, k))
390-
addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
391-
else
392-
addrs[k++] = (addr & PAGE_MASK) |
393-
(((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
394-
}
395-
396-
/* Partial cache lines (fragments) require special measures */
397-
if ((type == PAGELIST_READ) &&
398-
((pagelist->offset & (drv_mgmt->info->cache_line_size - 1)) ||
399-
((pagelist->offset + pagelist->length) &
400-
(drv_mgmt->info->cache_line_size - 1)))) {
401-
char *fragments;
402-
403-
if (down_interruptible(&drv_mgmt->free_fragments_sema)) {
404-
cleanup_pagelistinfo(instance, pagelistinfo);
405-
return NULL;
406-
}
407-
408-
WARN_ON(!drv_mgmt->free_fragments);
409-
410-
down(&drv_mgmt->free_fragments_mutex);
411-
fragments = drv_mgmt->free_fragments;
412-
WARN_ON(!fragments);
413-
drv_mgmt->free_fragments = *(char **)drv_mgmt->free_fragments;
414-
up(&drv_mgmt->free_fragments_mutex);
415-
pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
416-
(fragments - drv_mgmt->fragments_base) / drv_mgmt->fragments_size;
417-
}
418-
419-
return pagelistinfo;
420-
}
421-
422-
static void
423-
free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo,
424-
int actual)
425-
{
426-
struct vchiq_drv_mgmt *drv_mgmt;
427-
struct pagelist *pagelist = pagelistinfo->pagelist;
428-
struct page **pages = pagelistinfo->pages;
429-
unsigned int num_pages = pagelistinfo->num_pages;
430-
431-
dev_dbg(instance->state->dev, "arm: %pK, %d\n", pagelistinfo->pagelist, actual);
432-
433-
drv_mgmt = dev_get_drvdata(instance->state->dev);
434-
435-
/*
436-
* NOTE: dma_unmap_sg must be called before the
437-
* cpu can touch any of the data/pages.
438-
*/
439-
dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
440-
pagelistinfo->num_pages, pagelistinfo->dma_dir);
441-
pagelistinfo->scatterlist_mapped = 0;
442-
443-
/* Deal with any partial cache lines (fragments) */
444-
if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && drv_mgmt->fragments_base) {
445-
char *fragments = drv_mgmt->fragments_base +
446-
(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
447-
drv_mgmt->fragments_size;
448-
int head_bytes, tail_bytes;
449-
450-
head_bytes = (drv_mgmt->info->cache_line_size - pagelist->offset) &
451-
(drv_mgmt->info->cache_line_size - 1);
452-
tail_bytes = (pagelist->offset + actual) &
453-
(drv_mgmt->info->cache_line_size - 1);
454-
455-
if ((actual >= 0) && (head_bytes != 0)) {
456-
if (head_bytes > actual)
457-
head_bytes = actual;
458-
459-
memcpy_to_page(pages[0],
460-
pagelist->offset,
461-
fragments,
462-
head_bytes);
463-
}
464-
if ((actual >= 0) && (head_bytes < actual) &&
465-
(tail_bytes != 0))
466-
memcpy_to_page(pages[num_pages - 1],
467-
(pagelist->offset + actual) &
468-
(PAGE_SIZE - 1) & ~(drv_mgmt->info->cache_line_size - 1),
469-
fragments + drv_mgmt->info->cache_line_size,
470-
tail_bytes);
471-
472-
down(&drv_mgmt->free_fragments_mutex);
473-
*(char **)fragments = drv_mgmt->free_fragments;
474-
drv_mgmt->free_fragments = fragments;
475-
up(&drv_mgmt->free_fragments_mutex);
476-
up(&drv_mgmt->free_fragments_sema);
477-
}
478-
479-
/* Need to mark all the pages dirty. */
480-
if (pagelist->type != PAGELIST_WRITE &&
481-
pagelistinfo->pages_need_release) {
482-
unsigned int i;
483-
484-
for (i = 0; i < num_pages; i++)
485-
set_page_dirty(pages[i]);
486-
}
487-
488-
cleanup_pagelistinfo(instance, pagelistinfo);
489-
}
490-
491185
static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
492186
{
493187
struct device *dev = &pdev->dev;
@@ -616,38 +310,7 @@ static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *
616310
}
617311

618312

619-
int
620-
vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
621-
void __user *uoffset, int size, int dir)
622-
{
623-
struct vchiq_pagelist_info *pagelistinfo;
624-
625-
pagelistinfo = create_pagelist(instance, offset, uoffset, size,
626-
(dir == VCHIQ_BULK_RECEIVE)
627-
? PAGELIST_READ
628-
: PAGELIST_WRITE);
629-
630-
if (!pagelistinfo)
631-
return -ENOMEM;
632-
633-
bulk->data = pagelistinfo->dma_addr;
634-
635-
/*
636-
* Store the pagelistinfo address in remote_data,
637-
* which isn't used by the slave.
638-
*/
639-
bulk->remote_data = pagelistinfo;
640313

641-
return 0;
642-
}
643-
644-
void
645-
vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
646-
{
647-
if (bulk && bulk->remote_data && bulk->actual)
648-
free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data,
649-
bulk->actual);
650-
}
651314

652315
void vchiq_dump_platform_state(struct seq_file *f)
653316
{

0 commit comments

Comments
 (0)