@@ -58,7 +58,6 @@ struct dm_crypt_io {
5858 atomic_t io_pending ;
5959 int error ;
6060 sector_t sector ;
61- struct dm_crypt_io * base_io ;
6261} CRYPTO_MINALIGN_ATTR ;
6362
6463struct dm_crypt_request {
@@ -172,7 +171,6 @@ struct crypt_config {
172171};
173172
174173#define MIN_IOS 16
175- #define MIN_POOL_PAGES 32
176174
177175static struct kmem_cache * _crypt_io_pool ;
178176
@@ -946,56 +944,41 @@ static int crypt_convert(struct crypt_config *cc,
946944 return 0 ;
947945}
948946
947+ static void crypt_free_buffer_pages (struct crypt_config * cc , struct bio * clone );
948+
949949/*
950950 * Generate a new unfragmented bio with the given size
951951 * This should never violate the device limitations
952- * May return a smaller bio when running out of pages, indicated by
953- * *out_of_pages set to 1.
954952 */
955- static struct bio * crypt_alloc_buffer (struct dm_crypt_io * io , unsigned size ,
956- unsigned * out_of_pages )
953+ static struct bio * crypt_alloc_buffer (struct dm_crypt_io * io , unsigned size )
957954{
958955 struct crypt_config * cc = io -> cc ;
959956 struct bio * clone ;
960957 unsigned int nr_iovecs = (size + PAGE_SIZE - 1 ) >> PAGE_SHIFT ;
961958 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM ;
962959 unsigned i , len ;
963960 struct page * page ;
961+ struct bio_vec * bvec ;
964962
965963 clone = bio_alloc_bioset (GFP_NOIO , nr_iovecs , cc -> bs );
966964 if (!clone )
967965 return NULL ;
968966
969967 clone_init (io , clone );
970- * out_of_pages = 0 ;
971968
972969 for (i = 0 ; i < nr_iovecs ; i ++ ) {
973970 page = mempool_alloc (cc -> page_pool , gfp_mask );
974- if (!page ) {
975- * out_of_pages = 1 ;
976- break ;
977- }
978-
979- /*
980- * If additional pages cannot be allocated without waiting,
981- * return a partially-allocated bio. The caller will then try
982- * to allocate more bios while submitting this partial bio.
983- */
984- gfp_mask = (gfp_mask | __GFP_NOWARN ) & ~__GFP_WAIT ;
985971
986972 len = (size > PAGE_SIZE ) ? PAGE_SIZE : size ;
987973
988- if (! bio_add_page ( clone , page , len , 0 )) {
989- mempool_free ( page , cc -> page_pool ) ;
990- break ;
991- }
974+ bvec = & clone -> bi_io_vec [ clone -> bi_vcnt ++ ];
975+ bvec -> bv_page = page ;
976+ bvec -> bv_len = len ;
977+ bvec -> bv_offset = 0 ;
992978
993- size -= len ;
994- }
979+ clone -> bi_iter .bi_size += len ;
995980
996- if (!clone -> bi_iter .bi_size ) {
997- bio_put (clone );
998- return NULL ;
981+ size -= len ;
999982 }
1000983
1001984 return clone ;
@@ -1020,7 +1003,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
10201003 io -> base_bio = bio ;
10211004 io -> sector = sector ;
10221005 io -> error = 0 ;
1023- io -> base_io = NULL ;
10241006 io -> ctx .req = NULL ;
10251007 atomic_set (& io -> io_pending , 0 );
10261008}
@@ -1033,13 +1015,11 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
10331015/*
10341016 * One of the bios was finished. Check for completion of
10351017 * the whole request and correctly clean up the buffer.
1036- * If base_io is set, wait for the last fragment to complete.
10371018 */
10381019static void crypt_dec_pending (struct dm_crypt_io * io )
10391020{
10401021 struct crypt_config * cc = io -> cc ;
10411022 struct bio * base_bio = io -> base_bio ;
1042- struct dm_crypt_io * base_io = io -> base_io ;
10431023 int error = io -> error ;
10441024
10451025 if (!atomic_dec_and_test (& io -> io_pending ))
@@ -1050,13 +1030,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
10501030 if (io != dm_per_bio_data (base_bio , cc -> per_bio_data_size ))
10511031 mempool_free (io , cc -> io_pool );
10521032
1053- if (likely (!base_io ))
1054- bio_endio (base_bio , error );
1055- else {
1056- if (error && !base_io -> error )
1057- base_io -> error = error ;
1058- crypt_dec_pending (base_io );
1059- }
1033+ bio_endio (base_bio , error );
10601034}
10611035
10621036/*
@@ -1192,10 +1166,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
11921166{
11931167 struct crypt_config * cc = io -> cc ;
11941168 struct bio * clone ;
1195- struct dm_crypt_io * new_io ;
11961169 int crypt_finished ;
1197- unsigned out_of_pages = 0 ;
1198- unsigned remaining = io -> base_bio -> bi_iter .bi_size ;
11991170 sector_t sector = io -> sector ;
12001171 int r ;
12011172
@@ -1205,80 +1176,30 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
12051176 crypt_inc_pending (io );
12061177 crypt_convert_init (cc , & io -> ctx , NULL , io -> base_bio , sector );
12071178
1208- /*
1209- * The allocated buffers can be smaller than the whole bio,
1210- * so repeat the whole process until all the data can be handled.
1211- */
1212- while (remaining ) {
1213- clone = crypt_alloc_buffer (io , remaining , & out_of_pages );
1214- if (unlikely (!clone )) {
1215- io -> error = - ENOMEM ;
1216- break ;
1217- }
1218-
1219- io -> ctx .bio_out = clone ;
1220- io -> ctx .iter_out = clone -> bi_iter ;
1221-
1222- remaining -= clone -> bi_iter .bi_size ;
1223- sector += bio_sectors (clone );
1224-
1225- crypt_inc_pending (io );
1226-
1227- r = crypt_convert (cc , & io -> ctx );
1228- if (r < 0 )
1229- io -> error = - EIO ;
1230-
1231- crypt_finished = atomic_dec_and_test (& io -> ctx .cc_pending );
1232-
1233- /* Encryption was already finished, submit io now */
1234- if (crypt_finished ) {
1235- kcryptd_crypt_write_io_submit (io , 0 );
1236-
1237- /*
1238- * If there was an error, do not try next fragments.
1239- * For async, error is processed in async handler.
1240- */
1241- if (unlikely (r < 0 ))
1242- break ;
1179+ clone = crypt_alloc_buffer (io , io -> base_bio -> bi_iter .bi_size );
1180+ if (unlikely (!clone )) {
1181+ io -> error = - EIO ;
1182+ goto dec ;
1183+ }
12431184
1244- io -> sector = sector ;
1245- }
1185+ io -> ctx . bio_out = clone ;
1186+ io -> ctx . iter_out = clone -> bi_iter ;
12461187
1247- /*
1248- * Out of memory -> run queues
1249- * But don't wait if split was due to the io size restriction
1250- */
1251- if (unlikely (out_of_pages ))
1252- congestion_wait (BLK_RW_ASYNC , HZ /100 );
1188+ sector += bio_sectors (clone );
12531189
1254- /*
1255- * With async crypto it is unsafe to share the crypto context
1256- * between fragments, so switch to a new dm_crypt_io structure.
1257- */
1258- if (unlikely (!crypt_finished && remaining )) {
1259- new_io = mempool_alloc (cc -> io_pool , GFP_NOIO );
1260- crypt_io_init (new_io , io -> cc , io -> base_bio , sector );
1261- crypt_inc_pending (new_io );
1262- crypt_convert_init (cc , & new_io -> ctx , NULL ,
1263- io -> base_bio , sector );
1264- new_io -> ctx .iter_in = io -> ctx .iter_in ;
1265-
1266- /*
1267- * Fragments after the first use the base_io
1268- * pending count.
1269- */
1270- if (!io -> base_io )
1271- new_io -> base_io = io ;
1272- else {
1273- new_io -> base_io = io -> base_io ;
1274- crypt_inc_pending (io -> base_io );
1275- crypt_dec_pending (io );
1276- }
1190+ crypt_inc_pending (io );
1191+ r = crypt_convert (cc , & io -> ctx );
1192+ if (r )
1193+ io -> error = - EIO ;
1194+ crypt_finished = atomic_dec_and_test (& io -> ctx .cc_pending );
12771195
1278- io = new_io ;
1279- }
1196+ /* Encryption was already finished, submit io now */
1197+ if (crypt_finished ) {
1198+ kcryptd_crypt_write_io_submit (io , 0 );
1199+ io -> sector = sector ;
12801200 }
12811201
1202+ dec :
12821203 crypt_dec_pending (io );
12831204}
12841205
@@ -1746,7 +1667,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
17461667 sizeof (struct dm_crypt_request ) + iv_size_padding + cc -> iv_size ,
17471668 ARCH_KMALLOC_MINALIGN );
17481669
1749- cc -> page_pool = mempool_create_page_pool (MIN_POOL_PAGES , 0 );
1670+ cc -> page_pool = mempool_create_page_pool (BIO_MAX_PAGES , 0 );
17501671 if (!cc -> page_pool ) {
17511672 ti -> error = "Cannot allocate page mempool" ;
17521673 goto bad ;
0 commit comments