@@ -520,6 +520,8 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
520520
521521size_t _copy_to_iter (const void * addr , size_t bytes , struct iov_iter * i )
522522{
523+ if (WARN_ON_ONCE (i -> data_source ))
524+ return 0 ;
523525 if (unlikely (iov_iter_is_pipe (i )))
524526 return copy_pipe_to_iter (addr , bytes , i );
525527 if (user_backed_iter (i ))
@@ -606,6 +608,8 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
606608 */
607609size_t _copy_mc_to_iter (const void * addr , size_t bytes , struct iov_iter * i )
608610{
611+ if (WARN_ON_ONCE (i -> data_source ))
612+ return 0 ;
609613 if (unlikely (iov_iter_is_pipe (i )))
610614 return copy_mc_pipe_to_iter (addr , bytes , i );
611615 if (user_backed_iter (i ))
@@ -622,10 +626,9 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
622626
623627size_t _copy_from_iter (void * addr , size_t bytes , struct iov_iter * i )
624628{
625- if (unlikely (iov_iter_is_pipe (i ))) {
626- WARN_ON (1 );
629+ if (WARN_ON_ONCE (!i -> data_source ))
627630 return 0 ;
628- }
631+
629632 if (user_backed_iter (i ))
630633 might_fault ();
631634 iterate_and_advance (i , bytes , base , len , off ,
@@ -639,10 +642,9 @@ EXPORT_SYMBOL(_copy_from_iter);
639642
640643size_t _copy_from_iter_nocache (void * addr , size_t bytes , struct iov_iter * i )
641644{
642- if (unlikely (iov_iter_is_pipe (i ))) {
643- WARN_ON (1 );
645+ if (WARN_ON_ONCE (!i -> data_source ))
644646 return 0 ;
645- }
647+
646648 iterate_and_advance (i , bytes , base , len , off ,
647649 __copy_from_user_inatomic_nocache (addr + off , base , len ),
648650 memcpy (addr + off , base , len )
@@ -671,10 +673,9 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
671673 */
672674size_t _copy_from_iter_flushcache (void * addr , size_t bytes , struct iov_iter * i )
673675{
674- if (unlikely (iov_iter_is_pipe (i ))) {
675- WARN_ON (1 );
676+ if (WARN_ON_ONCE (!i -> data_source ))
676677 return 0 ;
677- }
678+
678679 iterate_and_advance (i , bytes , base , len , off ,
679680 __copy_from_user_flushcache (addr + off , base , len ),
680681 memcpy_flushcache (addr + off , base , len )
@@ -714,6 +715,8 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
714715 size_t res = 0 ;
715716 if (!page_copy_sane (page , offset , bytes ))
716717 return 0 ;
718+ if (WARN_ON_ONCE (i -> data_source ))
719+ return 0 ;
717720 if (unlikely (iov_iter_is_pipe (i )))
718721 return copy_page_to_iter_pipe (page , offset , bytes , i );
719722 page += offset / PAGE_SIZE ; // first subpage
@@ -811,9 +814,8 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt
811814 kunmap_atomic (kaddr );
812815 return 0 ;
813816 }
814- if (unlikely ( iov_iter_is_pipe ( i ) || iov_iter_is_discard ( i ) )) {
817+ if (WARN_ON_ONCE (! i -> data_source )) {
815818 kunmap_atomic (kaddr );
816- WARN_ON (1 );
817819 return 0 ;
818820 }
819821 iterate_and_advance (i , bytes , base , len , off ,
@@ -1525,10 +1527,9 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
15251527{
15261528 __wsum sum , next ;
15271529 sum = * csum ;
1528- if (unlikely (iov_iter_is_pipe (i ) || iov_iter_is_discard (i ))) {
1529- WARN_ON (1 );
1530+ if (WARN_ON_ONCE (!i -> data_source ))
15301531 return 0 ;
1531- }
1532+
15321533 iterate_and_advance (i , bytes , base , len , off , ({
15331534 next = csum_and_copy_from_user (base , addr + off , len );
15341535 sum = csum_block_add (sum , next , off );
@@ -1548,6 +1549,8 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
15481549 struct csum_state * csstate = _csstate ;
15491550 __wsum sum , next ;
15501551
1552+ if (WARN_ON_ONCE (i -> data_source ))
1553+ return 0 ;
15511554 if (unlikely (iov_iter_is_discard (i ))) {
15521555 // can't use csum_memcpy() for that one - data is not copied
15531556 csstate -> csum = csum_block_add (csstate -> csum ,
0 commit comments