Expand Up
@@ -3642,10 +3642,6 @@ ssize_t generic_perform_write(struct file *file,
* Otherwise there's a nasty deadlock on copying from the
* same page as we're writing to, without it being marked
* up-to-date.
*
* Not only is this an optimisation, but it is also required
* to check that the address is actually valid, when atomic
* usercopies are used, below.
*/
if (unlikely (iov_iter_fault_in_readable (i , bytes ))) {
status = - EFAULT ;
Expand All
@@ -3665,33 +3661,31 @@ ssize_t generic_perform_write(struct file *file,
if (mapping_writably_mapped (mapping ))
flush_dcache_page (page );
copied = iov_iter_copy_from_user_atomic (page , i , offset , bytes );
copied = copy_page_from_iter_atomic (page , offset , bytes , i );
flush_dcache_page (page );
status = a_ops -> write_end (file , mapping , pos , bytes , copied ,
page , fsdata );
if (unlikely (status < 0 ))
break ;
copied = status ;
if (unlikely (status != copied )) {
iov_iter_revert (i , copied - max (status , 0L ));
if (unlikely (status < 0 ))
break ;
}
cond_resched ();
iov_iter_advance (i , copied );
if (unlikely (copied == 0 )) {
if (unlikely (status == 0 )) {
/*
* If we were unable to copy any data at all, we must
* fall back to a single segment length write.
*
* If we didn't fallback here, we could livelock
* because not all segments in the iov can be copied at
* once without a pagefault.
* A short copy made ->write_end() reject the
* thing entirely. Might be memory poisoning
* halfway through, might be a race with munmap,
* might be severe memory pressure.
*/
bytes = min_t ( unsigned long , PAGE_SIZE - offset ,
iov_iter_single_seg_count ( i )) ;
if ( copied )
bytes = copied ;
goto again;
}
pos += copied ;
written += copied ;
pos += status ;
written += status ;
balance_dirty_pages_ratelimited (mapping );
} while (iov_iter_count (i ));
Expand Down