2424#include <linux/rmap.h>
2525#include <linux/pagemap.h>
2626
27+ /*
28+ * struct fb_deferred_io_state
29+ */
30+
31+ struct fb_deferred_io_state {
32+ struct kref ref ;
33+
34+ struct mutex lock ; /* mutex that protects the pageref list */
35+ /* fields protected by lock */
36+ struct fb_info * info ;
37+ };
38+
39+ static struct fb_deferred_io_state * fb_deferred_io_state_alloc (void )
40+ {
41+ struct fb_deferred_io_state * fbdefio_state ;
42+
43+ fbdefio_state = kzalloc (sizeof (* fbdefio_state ), GFP_KERNEL );
44+ if (!fbdefio_state )
45+ return NULL ;
46+
47+ kref_init (& fbdefio_state -> ref );
48+ mutex_init (& fbdefio_state -> lock );
49+
50+ return fbdefio_state ;
51+ }
52+
53+ static void fb_deferred_io_state_release (struct fb_deferred_io_state * fbdefio_state )
54+ {
55+ mutex_destroy (& fbdefio_state -> lock );
56+
57+ kfree (fbdefio_state );
58+ }
59+
60+ static void fb_deferred_io_state_get (struct fb_deferred_io_state * fbdefio_state )
61+ {
62+ kref_get (& fbdefio_state -> ref );
63+ }
64+
65+ static void __fb_deferred_io_state_release (struct kref * ref )
66+ {
67+ struct fb_deferred_io_state * fbdefio_state =
68+ container_of (ref , struct fb_deferred_io_state , ref );
69+
70+ fb_deferred_io_state_release (fbdefio_state );
71+ }
72+
73+ static void fb_deferred_io_state_put (struct fb_deferred_io_state * fbdefio_state )
74+ {
75+ kref_put (& fbdefio_state -> ref , __fb_deferred_io_state_release );
76+ }
77+
78+ /*
79+ * struct vm_operations_struct
80+ */
81+
82+ static void fb_deferred_io_vm_open (struct vm_area_struct * vma )
83+ {
84+ struct fb_deferred_io_state * fbdefio_state = vma -> vm_private_data ;
85+
86+ fb_deferred_io_state_get (fbdefio_state );
87+ }
88+
89+ static void fb_deferred_io_vm_close (struct vm_area_struct * vma )
90+ {
91+ struct fb_deferred_io_state * fbdefio_state = vma -> vm_private_data ;
92+
93+ fb_deferred_io_state_put (fbdefio_state );
94+ }
95+
2796static struct page * fb_deferred_io_get_page (struct fb_info * info , unsigned long offs )
2897{
2998 struct fb_deferred_io * fbdefio = info -> fbdefio ;
@@ -121,25 +190,46 @@ static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
121190/* this is to find and return the vmalloc-ed fb pages */
122191static vm_fault_t fb_deferred_io_fault (struct vm_fault * vmf )
123192{
193+ struct fb_info * info ;
124194 unsigned long offset ;
125195 struct page * page ;
126- struct fb_info * info = vmf -> vma -> vm_private_data ;
196+ vm_fault_t ret ;
197+ struct fb_deferred_io_state * fbdefio_state = vmf -> vma -> vm_private_data ;
198+
199+ mutex_lock (& fbdefio_state -> lock );
200+
201+ info = fbdefio_state -> info ;
202+ if (!info ) {
203+ ret = VM_FAULT_SIGBUS ; /* our device is gone */
204+ goto err_mutex_unlock ;
205+ }
127206
128207 offset = vmf -> pgoff << PAGE_SHIFT ;
129- if (offset >= info -> fix .smem_len )
130- return VM_FAULT_SIGBUS ;
208+ if (offset >= info -> fix .smem_len ) {
209+ ret = VM_FAULT_SIGBUS ;
210+ goto err_mutex_unlock ;
211+ }
131212
132213 page = fb_deferred_io_get_page (info , offset );
133- if (!page )
134- return VM_FAULT_SIGBUS ;
214+ if (!page ) {
215+ ret = VM_FAULT_SIGBUS ;
216+ goto err_mutex_unlock ;
217+ }
135218
136219 if (!vmf -> vma -> vm_file )
137220 fb_err (info , "no mapping available\n" );
138221
139222 BUG_ON (!info -> fbdefio -> mapping );
140223
224+ mutex_unlock (& fbdefio_state -> lock );
225+
141226 vmf -> page = page ;
227+
142228 return 0 ;
229+
230+ err_mutex_unlock :
231+ mutex_unlock (& fbdefio_state -> lock );
232+ return ret ;
143233}
144234
145235int fb_deferred_io_fsync (struct file * file , loff_t start , loff_t end , int datasync )
@@ -166,15 +256,24 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
166256 * Adds a page to the dirty list. Call this from struct
167257 * vm_operations_struct.page_mkwrite.
168258 */
169- static vm_fault_t fb_deferred_io_track_page (struct fb_info * info , unsigned long offset ,
170- struct page * page )
259+ static vm_fault_t fb_deferred_io_track_page (struct fb_deferred_io_state * fbdefio_state ,
260+ unsigned long offset , struct page * page )
171261{
172- struct fb_deferred_io * fbdefio = info -> fbdefio ;
262+ struct fb_info * info ;
263+ struct fb_deferred_io * fbdefio ;
173264 struct fb_deferred_io_pageref * pageref ;
174265 vm_fault_t ret ;
175266
176267 /* protect against the workqueue changing the page list */
177- mutex_lock (& fbdefio -> lock );
268+ mutex_lock (& fbdefio_state -> lock );
269+
270+ info = fbdefio_state -> info ;
271+ if (!info ) {
272+ ret = VM_FAULT_SIGBUS ; /* our device is gone */
273+ goto err_mutex_unlock ;
274+ }
275+
276+ fbdefio = info -> fbdefio ;
178277
179278 pageref = fb_deferred_io_pageref_get (info , offset , page );
180279 if (WARN_ON_ONCE (!pageref )) {
@@ -192,50 +291,38 @@ static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long
192291 */
193292 lock_page (pageref -> page );
194293
195- mutex_unlock (& fbdefio -> lock );
294+ mutex_unlock (& fbdefio_state -> lock );
196295
197296 /* come back after delay to process the deferred IO */
198297 schedule_delayed_work (& info -> deferred_work , fbdefio -> delay );
199298 return VM_FAULT_LOCKED ;
200299
201300err_mutex_unlock :
202- mutex_unlock (& fbdefio -> lock );
301+ mutex_unlock (& fbdefio_state -> lock );
203302 return ret ;
204303}
205304
206- /*
207- * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
208- * @fb_info: The fbdev info structure
209- * @vmf: The VM fault
210- *
211- * This is a callback we get when userspace first tries to
212- * write to the page. We schedule a workqueue. That workqueue
213- * will eventually mkclean the touched pages and execute the
214- * deferred framebuffer IO. Then if userspace touches a page
215- * again, we repeat the same scheme.
216- *
217- * Returns:
218- * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
219- */
220- static vm_fault_t fb_deferred_io_page_mkwrite (struct fb_info * info , struct vm_fault * vmf )
305+ static vm_fault_t fb_deferred_io_page_mkwrite (struct fb_deferred_io_state * fbdefio_state ,
306+ struct vm_fault * vmf )
221307{
222308 unsigned long offset = vmf -> pgoff << PAGE_SHIFT ;
223309 struct page * page = vmf -> page ;
224310
225311 file_update_time (vmf -> vma -> vm_file );
226312
227- return fb_deferred_io_track_page (info , offset , page );
313+ return fb_deferred_io_track_page (fbdefio_state , offset , page );
228314}
229315
230- /* vm_ops->page_mkwrite handler */
231316static vm_fault_t fb_deferred_io_mkwrite (struct vm_fault * vmf )
232317{
233- struct fb_info * info = vmf -> vma -> vm_private_data ;
318+ struct fb_deferred_io_state * fbdefio_state = vmf -> vma -> vm_private_data ;
234319
235- return fb_deferred_io_page_mkwrite (info , vmf );
320+ return fb_deferred_io_page_mkwrite (fbdefio_state , vmf );
236321}
237322
238323static const struct vm_operations_struct fb_deferred_io_vm_ops = {
324+ .open = fb_deferred_io_vm_open ,
325+ .close = fb_deferred_io_vm_close ,
239326 .fault = fb_deferred_io_fault ,
240327 .page_mkwrite = fb_deferred_io_mkwrite ,
241328};
@@ -252,7 +339,10 @@ int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
252339 vm_flags_set (vma , VM_DONTEXPAND | VM_DONTDUMP );
253340 if (!(info -> flags & FBINFO_VIRTFB ))
254341 vm_flags_set (vma , VM_IO );
255- vma -> vm_private_data = info ;
342+ vma -> vm_private_data = info -> fbdefio_state ;
343+
344+ fb_deferred_io_state_get (info -> fbdefio_state ); /* released in vma->vm_ops->close() */
345+
256346 return 0 ;
257347}
258348EXPORT_SYMBOL_GPL (fb_deferred_io_mmap );
@@ -263,9 +353,10 @@ static void fb_deferred_io_work(struct work_struct *work)
263353 struct fb_info * info = container_of (work , struct fb_info , deferred_work .work );
264354 struct fb_deferred_io_pageref * pageref , * next ;
265355 struct fb_deferred_io * fbdefio = info -> fbdefio ;
356+ struct fb_deferred_io_state * fbdefio_state = info -> fbdefio_state ;
266357
267358 /* here we wrprotect the page's mappings, then do all deferred IO. */
268- mutex_lock (& fbdefio -> lock );
359+ mutex_lock (& fbdefio_state -> lock );
269360#ifdef CONFIG_MMU
270361 list_for_each_entry (pageref , & fbdefio -> pagereflist , list ) {
271362 struct page * page = pageref -> page ;
@@ -283,12 +374,13 @@ static void fb_deferred_io_work(struct work_struct *work)
283374 list_for_each_entry_safe (pageref , next , & fbdefio -> pagereflist , list )
284375 fb_deferred_io_pageref_put (pageref , info );
285376
286- mutex_unlock (& fbdefio -> lock );
377+ mutex_unlock (& fbdefio_state -> lock );
287378}
288379
289380int fb_deferred_io_init (struct fb_info * info )
290381{
291382 struct fb_deferred_io * fbdefio = info -> fbdefio ;
383+ struct fb_deferred_io_state * fbdefio_state ;
292384 struct fb_deferred_io_pageref * pagerefs ;
293385 unsigned long npagerefs ;
294386 int ret ;
@@ -298,7 +390,11 @@ int fb_deferred_io_init(struct fb_info *info)
298390 if (WARN_ON (!info -> fix .smem_len ))
299391 return - EINVAL ;
300392
301- mutex_init (& fbdefio -> lock );
393+ fbdefio_state = fb_deferred_io_state_alloc ();
394+ if (!fbdefio_state )
395+ return - ENOMEM ;
396+ fbdefio_state -> info = info ;
397+
302398 INIT_DELAYED_WORK (& info -> deferred_work , fb_deferred_io_work );
303399 INIT_LIST_HEAD (& fbdefio -> pagereflist );
304400 if (fbdefio -> delay == 0 ) /* set a default of 1 s */
@@ -315,10 +411,12 @@ int fb_deferred_io_init(struct fb_info *info)
315411 info -> npagerefs = npagerefs ;
316412 info -> pagerefs = pagerefs ;
317413
414+ info -> fbdefio_state = fbdefio_state ;
415+
318416 return 0 ;
319417
320418err :
321- mutex_destroy ( & fbdefio -> lock );
419+ fb_deferred_io_state_release ( fbdefio_state );
322420 return ret ;
323421}
324422EXPORT_SYMBOL_GPL (fb_deferred_io_init );
@@ -352,11 +450,19 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_release);
352450void fb_deferred_io_cleanup (struct fb_info * info )
353451{
354452 struct fb_deferred_io * fbdefio = info -> fbdefio ;
453+ struct fb_deferred_io_state * fbdefio_state = info -> fbdefio_state ;
355454
356455 fb_deferred_io_lastclose (info );
357456
457+ info -> fbdefio_state = NULL ;
458+
459+ mutex_lock (& fbdefio_state -> lock );
460+ fbdefio_state -> info = NULL ;
461+ mutex_unlock (& fbdefio_state -> lock );
462+
463+ fb_deferred_io_state_put (fbdefio_state );
464+
358465 kvfree (info -> pagerefs );
359- mutex_destroy (& fbdefio -> lock );
360466 fbdefio -> mapping = NULL ;
361467}
362468EXPORT_SYMBOL_GPL (fb_deferred_io_cleanup );
0 commit comments