@@ -23,6 +23,8 @@ struct crypto_scomp;
2323
2424static const struct crypto_type crypto_acomp_type ;
2525
26+ static void acomp_reqchain_done (void * data , int err );
27+
2628static inline struct acomp_alg * __crypto_acomp_alg (struct crypto_alg * alg )
2729{
2830 return container_of (alg , struct acomp_alg , calg .base );
@@ -123,6 +125,201 @@ struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
123125}
124126EXPORT_SYMBOL_GPL (crypto_alloc_acomp_node );
125127
128+ static bool acomp_request_has_nondma (struct acomp_req * req )
129+ {
130+ struct acomp_req * r2 ;
131+
132+ if (acomp_request_isnondma (req ))
133+ return true;
134+
135+ list_for_each_entry (r2 , & req -> base .list , base .list )
136+ if (acomp_request_isnondma (r2 ))
137+ return true;
138+
139+ return false;
140+ }
141+
142+ static void acomp_save_req (struct acomp_req * req , crypto_completion_t cplt )
143+ {
144+ struct crypto_acomp * tfm = crypto_acomp_reqtfm (req );
145+ struct acomp_req_chain * state = & req -> chain ;
146+
147+ if (!acomp_is_async (tfm ))
148+ return ;
149+
150+ state -> compl = req -> base .complete ;
151+ state -> data = req -> base .data ;
152+ req -> base .complete = cplt ;
153+ req -> base .data = state ;
154+ state -> req0 = req ;
155+ }
156+
157+ static void acomp_restore_req (struct acomp_req_chain * state )
158+ {
159+ struct acomp_req * req = state -> req0 ;
160+ struct crypto_acomp * tfm ;
161+
162+ tfm = crypto_acomp_reqtfm (req );
163+ if (!acomp_is_async (tfm ))
164+ return ;
165+
166+ req -> base .complete = state -> compl ;
167+ req -> base .data = state -> data ;
168+ }
169+
170+ static void acomp_reqchain_virt (struct acomp_req_chain * state , int err )
171+ {
172+ struct acomp_req * req = state -> cur ;
173+ unsigned int slen = req -> slen ;
174+ unsigned int dlen = req -> dlen ;
175+
176+ req -> base .err = err ;
177+ state = & req -> chain ;
178+
179+ if (state -> src )
180+ acomp_request_set_src_dma (req , state -> src , slen );
181+ if (state -> dst )
182+ acomp_request_set_dst_dma (req , state -> dst , dlen );
183+ state -> src = NULL ;
184+ state -> dst = NULL ;
185+ }
186+
187+ static void acomp_virt_to_sg (struct acomp_req * req )
188+ {
189+ struct acomp_req_chain * state = & req -> chain ;
190+
191+ if (acomp_request_src_isvirt (req )) {
192+ unsigned int slen = req -> slen ;
193+ const u8 * svirt = req -> svirt ;
194+
195+ state -> src = svirt ;
196+ sg_init_one (& state -> ssg , svirt , slen );
197+ acomp_request_set_src_sg (req , & state -> ssg , slen );
198+ }
199+
200+ if (acomp_request_dst_isvirt (req )) {
201+ unsigned int dlen = req -> dlen ;
202+ u8 * dvirt = req -> dvirt ;
203+
204+ state -> dst = dvirt ;
205+ sg_init_one (& state -> dsg , dvirt , dlen );
206+ acomp_request_set_dst_sg (req , & state -> dsg , dlen );
207+ }
208+ }
209+
210+ static int acomp_reqchain_finish (struct acomp_req_chain * state ,
211+ int err , u32 mask )
212+ {
213+ struct acomp_req * req0 = state -> req0 ;
214+ struct acomp_req * req = state -> cur ;
215+ struct acomp_req * n ;
216+
217+ acomp_reqchain_virt (state , err );
218+
219+ if (req != req0 )
220+ list_add_tail (& req -> base .list , & req0 -> base .list );
221+
222+ list_for_each_entry_safe (req , n , & state -> head , base .list ) {
223+ list_del_init (& req -> base .list );
224+
225+ req -> base .flags &= mask ;
226+ req -> base .complete = acomp_reqchain_done ;
227+ req -> base .data = state ;
228+ state -> cur = req ;
229+
230+ acomp_virt_to_sg (req );
231+ err = state -> op (req );
232+
233+ if (err == - EINPROGRESS ) {
234+ if (!list_empty (& state -> head ))
235+ err = - EBUSY ;
236+ goto out ;
237+ }
238+
239+ if (err == - EBUSY )
240+ goto out ;
241+
242+ acomp_reqchain_virt (state , err );
243+ list_add_tail (& req -> base .list , & req0 -> base .list );
244+ }
245+
246+ acomp_restore_req (state );
247+
248+ out :
249+ return err ;
250+ }
251+
252+ static void acomp_reqchain_done (void * data , int err )
253+ {
254+ struct acomp_req_chain * state = data ;
255+ crypto_completion_t compl = state -> compl ;
256+
257+ data = state -> data ;
258+
259+ if (err == - EINPROGRESS ) {
260+ if (!list_empty (& state -> head ))
261+ return ;
262+ goto notify ;
263+ }
264+
265+ err = acomp_reqchain_finish (state , err , CRYPTO_TFM_REQ_MAY_BACKLOG );
266+ if (err == - EBUSY )
267+ return ;
268+
269+ notify :
270+ compl (data , err );
271+ }
272+
273+ static int acomp_do_req_chain (struct acomp_req * req ,
274+ int (* op )(struct acomp_req * req ))
275+ {
276+ struct crypto_acomp * tfm = crypto_acomp_reqtfm (req );
277+ struct acomp_req_chain * state = & req -> chain ;
278+ int err ;
279+
280+ if (crypto_acomp_req_chain (tfm ) ||
281+ (!acomp_request_chained (req ) && !acomp_request_isvirt (req )))
282+ return op (req );
283+
284+ /*
285+ * There are no in-kernel users that do this. If and ever
286+ * such users come into being then we could add a fall-back
287+ * path.
288+ */
289+ if (acomp_request_has_nondma (req ))
290+ return - EINVAL ;
291+
292+ if (acomp_is_async (tfm )) {
293+ acomp_save_req (req , acomp_reqchain_done );
294+ state = req -> base .data ;
295+ }
296+
297+ state -> op = op ;
298+ state -> cur = req ;
299+ state -> src = NULL ;
300+ INIT_LIST_HEAD (& state -> head );
301+ list_splice_init (& req -> base .list , & state -> head );
302+
303+ acomp_virt_to_sg (req );
304+ err = op (req );
305+ if (err == - EBUSY || err == - EINPROGRESS )
306+ return - EBUSY ;
307+
308+ return acomp_reqchain_finish (state , err , ~0 );
309+ }
310+
311+ int crypto_acomp_compress (struct acomp_req * req )
312+ {
313+ return acomp_do_req_chain (req , crypto_acomp_reqtfm (req )-> compress );
314+ }
315+ EXPORT_SYMBOL_GPL (crypto_acomp_compress );
316+
317+ int crypto_acomp_decompress (struct acomp_req * req )
318+ {
319+ return acomp_do_req_chain (req , crypto_acomp_reqtfm (req )-> decompress );
320+ }
321+ EXPORT_SYMBOL_GPL (crypto_acomp_decompress );
322+
126323void comp_prepare_alg (struct comp_alg_common * alg )
127324{
128325 struct crypto_alg * base = & alg -> base ;
0 commit comments