Skip to content

Commit 25c2b77

Browse files
tdzgregkh
authored andcommitted
fbdev: defio: Disconnect deferred I/O from the lifetime of struct fb_info
[ Upstream commit 9ded47a ] Hold state of deferred I/O in struct fb_deferred_io_state. Allocate an instance as part of initializing deferred I/O and remove it only after the final mapping has been closed. If the fb_info and the contained deferred I/O meanwhile goes away, clear struct fb_deferred_io_state.info to invalidate the mapping. Any access will then result in a SIGBUS signal. Fixes a long-standing problem, where a device hot-unplug happens while user space still has an active mapping of the graphics memory. The hot- unplug frees the instance of struct fb_info. Accessing the memory will operate on undefined state. Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de> Fixes: 60b59be ("fbdev: mm: Deferred IO support") Cc: Helge Deller <deller@gmx.de> Cc: linux-fbdev@vger.kernel.org Cc: dri-devel@lists.freedesktop.org Cc: stable@vger.kernel.org # v2.6.22+ Signed-off-by: Helge Deller <deller@gmx.de> [ replaced kzalloc_obj(*fbdefio_state) with kzalloc(sizeof(*fbdefio_state), GFP_KERNEL) ] Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 1de2db1 commit 25c2b77

2 files changed

Lines changed: 145 additions & 37 deletions

File tree

drivers/video/fbdev/core/fb_defio.c

Lines changed: 142 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,75 @@
2424
#include <linux/rmap.h>
2525
#include <linux/pagemap.h>
2626

27+
/*
28+
* struct fb_deferred_io_state
29+
*/
30+
31+
struct fb_deferred_io_state {
32+
struct kref ref;
33+
34+
struct mutex lock; /* mutex that protects the pageref list */
35+
/* fields protected by lock */
36+
struct fb_info *info;
37+
};
38+
39+
static struct fb_deferred_io_state *fb_deferred_io_state_alloc(void)
40+
{
41+
struct fb_deferred_io_state *fbdefio_state;
42+
43+
fbdefio_state = kzalloc(sizeof(*fbdefio_state), GFP_KERNEL);
44+
if (!fbdefio_state)
45+
return NULL;
46+
47+
kref_init(&fbdefio_state->ref);
48+
mutex_init(&fbdefio_state->lock);
49+
50+
return fbdefio_state;
51+
}
52+
53+
static void fb_deferred_io_state_release(struct fb_deferred_io_state *fbdefio_state)
54+
{
55+
mutex_destroy(&fbdefio_state->lock);
56+
57+
kfree(fbdefio_state);
58+
}
59+
60+
static void fb_deferred_io_state_get(struct fb_deferred_io_state *fbdefio_state)
61+
{
62+
kref_get(&fbdefio_state->ref);
63+
}
64+
65+
static void __fb_deferred_io_state_release(struct kref *ref)
66+
{
67+
struct fb_deferred_io_state *fbdefio_state =
68+
container_of(ref, struct fb_deferred_io_state, ref);
69+
70+
fb_deferred_io_state_release(fbdefio_state);
71+
}
72+
73+
static void fb_deferred_io_state_put(struct fb_deferred_io_state *fbdefio_state)
74+
{
75+
kref_put(&fbdefio_state->ref, __fb_deferred_io_state_release);
76+
}
77+
78+
/*
79+
* struct vm_operations_struct
80+
*/
81+
82+
static void fb_deferred_io_vm_open(struct vm_area_struct *vma)
83+
{
84+
struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
85+
86+
fb_deferred_io_state_get(fbdefio_state);
87+
}
88+
89+
static void fb_deferred_io_vm_close(struct vm_area_struct *vma)
90+
{
91+
struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
92+
93+
fb_deferred_io_state_put(fbdefio_state);
94+
}
95+
2796
static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned long offs)
2897
{
2998
struct fb_deferred_io *fbdefio = info->fbdefio;
@@ -121,25 +190,46 @@ static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
121190
/* this is to find and return the vmalloc-ed fb pages */
122191
static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
123192
{
193+
struct fb_info *info;
124194
unsigned long offset;
125195
struct page *page;
126-
struct fb_info *info = vmf->vma->vm_private_data;
196+
vm_fault_t ret;
197+
struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
198+
199+
mutex_lock(&fbdefio_state->lock);
200+
201+
info = fbdefio_state->info;
202+
if (!info) {
203+
ret = VM_FAULT_SIGBUS; /* our device is gone */
204+
goto err_mutex_unlock;
205+
}
127206

128207
offset = vmf->pgoff << PAGE_SHIFT;
129-
if (offset >= info->fix.smem_len)
130-
return VM_FAULT_SIGBUS;
208+
if (offset >= info->fix.smem_len) {
209+
ret = VM_FAULT_SIGBUS;
210+
goto err_mutex_unlock;
211+
}
131212

132213
page = fb_deferred_io_get_page(info, offset);
133-
if (!page)
134-
return VM_FAULT_SIGBUS;
214+
if (!page) {
215+
ret = VM_FAULT_SIGBUS;
216+
goto err_mutex_unlock;
217+
}
135218

136219
if (!vmf->vma->vm_file)
137220
fb_err(info, "no mapping available\n");
138221

139222
BUG_ON(!info->fbdefio->mapping);
140223

224+
mutex_unlock(&fbdefio_state->lock);
225+
141226
vmf->page = page;
227+
142228
return 0;
229+
230+
err_mutex_unlock:
231+
mutex_unlock(&fbdefio_state->lock);
232+
return ret;
143233
}
144234

145235
int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
@@ -166,15 +256,24 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
166256
* Adds a page to the dirty list. Call this from struct
167257
* vm_operations_struct.page_mkwrite.
168258
*/
169-
static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
170-
struct page *page)
259+
static vm_fault_t fb_deferred_io_track_page(struct fb_deferred_io_state *fbdefio_state,
260+
unsigned long offset, struct page *page)
171261
{
172-
struct fb_deferred_io *fbdefio = info->fbdefio;
262+
struct fb_info *info;
263+
struct fb_deferred_io *fbdefio;
173264
struct fb_deferred_io_pageref *pageref;
174265
vm_fault_t ret;
175266

176267
/* protect against the workqueue changing the page list */
177-
mutex_lock(&fbdefio->lock);
268+
mutex_lock(&fbdefio_state->lock);
269+
270+
info = fbdefio_state->info;
271+
if (!info) {
272+
ret = VM_FAULT_SIGBUS; /* our device is gone */
273+
goto err_mutex_unlock;
274+
}
275+
276+
fbdefio = info->fbdefio;
178277

179278
pageref = fb_deferred_io_pageref_get(info, offset, page);
180279
if (WARN_ON_ONCE(!pageref)) {
@@ -192,50 +291,38 @@ static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long
192291
*/
193292
lock_page(pageref->page);
194293

195-
mutex_unlock(&fbdefio->lock);
294+
mutex_unlock(&fbdefio_state->lock);
196295

197296
/* come back after delay to process the deferred IO */
198297
schedule_delayed_work(&info->deferred_work, fbdefio->delay);
199298
return VM_FAULT_LOCKED;
200299

201300
err_mutex_unlock:
202-
mutex_unlock(&fbdefio->lock);
301+
mutex_unlock(&fbdefio_state->lock);
203302
return ret;
204303
}
205304

206-
/*
207-
* fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
208-
* @fb_info: The fbdev info structure
209-
* @vmf: The VM fault
210-
*
211-
* This is a callback we get when userspace first tries to
212-
* write to the page. We schedule a workqueue. That workqueue
213-
* will eventually mkclean the touched pages and execute the
214-
* deferred framebuffer IO. Then if userspace touches a page
215-
* again, we repeat the same scheme.
216-
*
217-
* Returns:
218-
* VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
219-
*/
220-
static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
305+
static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_deferred_io_state *fbdefio_state,
306+
struct vm_fault *vmf)
221307
{
222308
unsigned long offset = vmf->pgoff << PAGE_SHIFT;
223309
struct page *page = vmf->page;
224310

225311
file_update_time(vmf->vma->vm_file);
226312

227-
return fb_deferred_io_track_page(info, offset, page);
313+
return fb_deferred_io_track_page(fbdefio_state, offset, page);
228314
}
229315

230-
/* vm_ops->page_mkwrite handler */
231316
static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
232317
{
233-
struct fb_info *info = vmf->vma->vm_private_data;
318+
struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
234319

235-
return fb_deferred_io_page_mkwrite(info, vmf);
320+
return fb_deferred_io_page_mkwrite(fbdefio_state, vmf);
236321
}
237322

238323
static const struct vm_operations_struct fb_deferred_io_vm_ops = {
324+
.open = fb_deferred_io_vm_open,
325+
.close = fb_deferred_io_vm_close,
239326
.fault = fb_deferred_io_fault,
240327
.page_mkwrite = fb_deferred_io_mkwrite,
241328
};
@@ -252,7 +339,10 @@ int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
252339
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
253340
if (!(info->flags & FBINFO_VIRTFB))
254341
vm_flags_set(vma, VM_IO);
255-
vma->vm_private_data = info;
342+
vma->vm_private_data = info->fbdefio_state;
343+
344+
fb_deferred_io_state_get(info->fbdefio_state); /* released in vma->vm_ops->close() */
345+
256346
return 0;
257347
}
258348
EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
@@ -263,9 +353,10 @@ static void fb_deferred_io_work(struct work_struct *work)
263353
struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
264354
struct fb_deferred_io_pageref *pageref, *next;
265355
struct fb_deferred_io *fbdefio = info->fbdefio;
356+
struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
266357

267358
/* here we wrprotect the page's mappings, then do all deferred IO. */
268-
mutex_lock(&fbdefio->lock);
359+
mutex_lock(&fbdefio_state->lock);
269360
#ifdef CONFIG_MMU
270361
list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
271362
struct page *page = pageref->page;
@@ -283,12 +374,13 @@ static void fb_deferred_io_work(struct work_struct *work)
283374
list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
284375
fb_deferred_io_pageref_put(pageref, info);
285376

286-
mutex_unlock(&fbdefio->lock);
377+
mutex_unlock(&fbdefio_state->lock);
287378
}
288379

289380
int fb_deferred_io_init(struct fb_info *info)
290381
{
291382
struct fb_deferred_io *fbdefio = info->fbdefio;
383+
struct fb_deferred_io_state *fbdefio_state;
292384
struct fb_deferred_io_pageref *pagerefs;
293385
unsigned long npagerefs;
294386
int ret;
@@ -298,7 +390,11 @@ int fb_deferred_io_init(struct fb_info *info)
298390
if (WARN_ON(!info->fix.smem_len))
299391
return -EINVAL;
300392

301-
mutex_init(&fbdefio->lock);
393+
fbdefio_state = fb_deferred_io_state_alloc();
394+
if (!fbdefio_state)
395+
return -ENOMEM;
396+
fbdefio_state->info = info;
397+
302398
INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
303399
INIT_LIST_HEAD(&fbdefio->pagereflist);
304400
if (fbdefio->delay == 0) /* set a default of 1 s */
@@ -315,10 +411,12 @@ int fb_deferred_io_init(struct fb_info *info)
315411
info->npagerefs = npagerefs;
316412
info->pagerefs = pagerefs;
317413

414+
info->fbdefio_state = fbdefio_state;
415+
318416
return 0;
319417

320418
err:
321-
mutex_destroy(&fbdefio->lock);
419+
fb_deferred_io_state_release(fbdefio_state);
322420
return ret;
323421
}
324422
EXPORT_SYMBOL_GPL(fb_deferred_io_init);
@@ -352,11 +450,19 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_release);
352450
void fb_deferred_io_cleanup(struct fb_info *info)
353451
{
354452
struct fb_deferred_io *fbdefio = info->fbdefio;
453+
struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
355454

356455
fb_deferred_io_lastclose(info);
357456

457+
info->fbdefio_state = NULL;
458+
459+
mutex_lock(&fbdefio_state->lock);
460+
fbdefio_state->info = NULL;
461+
mutex_unlock(&fbdefio_state->lock);
462+
463+
fb_deferred_io_state_put(fbdefio_state);
464+
358465
kvfree(info->pagerefs);
359-
mutex_destroy(&fbdefio->lock);
360466
fbdefio->mapping = NULL;
361467
}
362468
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);

include/linux/fb.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -217,13 +217,14 @@ struct fb_deferred_io {
217217
unsigned long delay;
218218
bool sort_pagereflist; /* sort pagelist by offset */
219219
int open_count; /* number of opened files; protected by fb_info lock */
220-
struct mutex lock; /* mutex that protects the pageref list */
221220
struct list_head pagereflist; /* list of pagerefs for touched pages */
222221
struct address_space *mapping; /* page cache object for fb device */
223222
/* callback */
224223
struct page *(*get_page)(struct fb_info *info, unsigned long offset);
225224
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
226225
};
226+
227+
struct fb_deferred_io_state;
227228
#endif
228229

229230
/*
@@ -490,6 +491,7 @@ struct fb_info {
490491
unsigned long npagerefs;
491492
struct fb_deferred_io_pageref *pagerefs;
492493
struct fb_deferred_io *fbdefio;
494+
struct fb_deferred_io_state *fbdefio_state;
493495
#endif
494496

495497
const struct fb_ops *fbops;

0 commit comments

Comments
 (0)