@@ -49,16 +49,6 @@ static inline void skcipher_map_dst(struct skcipher_walk *walk)
4949 walk -> dst .virt .addr = scatterwalk_map (& walk -> out );
5050}
5151
52- static inline void skcipher_unmap_src (struct skcipher_walk * walk )
53- {
54- scatterwalk_unmap (walk -> src .virt .addr );
55- }
56-
57- static inline void skcipher_unmap_dst (struct skcipher_walk * walk )
58- {
59- scatterwalk_unmap (walk -> dst .virt .addr );
60- }
61-
6252static inline gfp_t skcipher_walk_gfp (struct skcipher_walk * walk )
6353{
6454 return walk -> flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC ;
@@ -70,14 +60,6 @@ static inline struct skcipher_alg *__crypto_skcipher_alg(
7060 return container_of (alg , struct skcipher_alg , base );
7161}
7262
73- static int skcipher_done_slow (struct skcipher_walk * walk , unsigned int bsize )
74- {
75- u8 * addr = PTR_ALIGN (walk -> buffer , walk -> alignmask + 1 );
76-
77- scatterwalk_copychunks (addr , & walk -> out , bsize , 1 );
78- return 0 ;
79- }
80-
8163/**
8264 * skcipher_walk_done() - finish one step of a skcipher_walk
8365 * @walk: the skcipher_walk
@@ -112,15 +94,14 @@ int skcipher_walk_done(struct skcipher_walk *walk, int res)
11294 if (likely (!(walk -> flags & (SKCIPHER_WALK_SLOW |
11395 SKCIPHER_WALK_COPY |
11496 SKCIPHER_WALK_DIFF )))) {
115- unmap_src :
116- skcipher_unmap_src (walk );
97+ scatterwalk_advance (& walk -> in , n );
11798 } else if (walk -> flags & SKCIPHER_WALK_DIFF ) {
118- skcipher_unmap_dst (walk );
119- goto unmap_src ;
99+ scatterwalk_unmap (walk -> src . virt . addr );
100+ scatterwalk_advance ( & walk -> in , n ) ;
120101 } else if (walk -> flags & SKCIPHER_WALK_COPY ) {
102+ scatterwalk_advance (& walk -> in , n );
121103 skcipher_map_dst (walk );
122104 memcpy (walk -> dst .virt .addr , walk -> page , n );
123- skcipher_unmap_dst (walk );
124105 } else { /* SKCIPHER_WALK_SLOW */
125106 if (res > 0 ) {
126107 /*
@@ -131,21 +112,23 @@ int skcipher_walk_done(struct skcipher_walk *walk, int res)
131112 */
132113 res = - EINVAL ;
133114 total = 0 ;
134- } else
135- n = skcipher_done_slow (walk , n );
115+ } else {
116+ u8 * buf = PTR_ALIGN (walk -> buffer , walk -> alignmask + 1 );
117+
118+ memcpy_to_scatterwalk (& walk -> out , buf , n );
119+ }
120+ goto dst_done ;
136121 }
137122
123+ scatterwalk_done_dst (& walk -> out , walk -> dst .virt .addr , n );
124+ dst_done :
125+
138126 if (res > 0 )
139127 res = 0 ;
140128
141129 walk -> total = total ;
142130 walk -> nbytes = 0 ;
143131
144- scatterwalk_advance (& walk -> in , n );
145- scatterwalk_advance (& walk -> out , n );
146- scatterwalk_done (& walk -> in , 0 , total );
147- scatterwalk_done (& walk -> out , 1 , total );
148-
149132 if (total ) {
150133 if (walk -> flags & SKCIPHER_WALK_SLEEP )
151134 cond_resched ();
@@ -192,7 +175,7 @@ static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
192175 walk -> dst .virt .addr = PTR_ALIGN (buffer , alignmask + 1 );
193176 walk -> src .virt .addr = walk -> dst .virt .addr ;
194177
195- scatterwalk_copychunks (walk -> src .virt .addr , & walk -> in , bsize , 0 );
178+ memcpy_from_scatterwalk (walk -> src .virt .addr , & walk -> in , bsize );
196179
197180 walk -> nbytes = bsize ;
198181 walk -> flags |= SKCIPHER_WALK_SLOW ;
@@ -206,7 +189,11 @@ static int skcipher_next_copy(struct skcipher_walk *walk)
206189
207190 skcipher_map_src (walk );
208191 memcpy (tmp , walk -> src .virt .addr , walk -> nbytes );
209- skcipher_unmap_src (walk );
192+ scatterwalk_unmap (walk -> src .virt .addr );
193+ /*
194+ * walk->in is advanced later when the number of bytes actually
195+ * processed (which might be less than walk->nbytes) is known.
196+ */
210197
211198 walk -> src .virt .addr = tmp ;
212199 walk -> dst .virt .addr = tmp ;
0 commit comments