@@ -582,7 +582,7 @@ int dm_split_args(int *argc, char ***argvp, char *input)
582582static void dm_set_stacking_limits (struct queue_limits * limits )
583583{
584584 blk_set_stacking_limits (limits );
585- limits -> features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT ;
585+ limits -> features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL ;
586586}
587587
588588/*
@@ -1024,14 +1024,13 @@ bool dm_table_request_based(struct dm_table *t)
10241024 return __table_type_request_based (dm_table_get_type (t ));
10251025}
10261026
1027- static bool dm_table_supports_poll (struct dm_table * t );
1028-
10291027static int dm_table_alloc_md_mempools (struct dm_table * t , struct mapped_device * md )
10301028{
10311029 enum dm_queue_mode type = dm_table_get_type (t );
10321030 unsigned int per_io_data_size = 0 , front_pad , io_front_pad ;
10331031 unsigned int min_pool_size = 0 , pool_size ;
10341032 struct dm_md_mempools * pools ;
1033+ unsigned int bioset_flags = 0 ;
10351034
10361035 if (unlikely (type == DM_TYPE_NONE )) {
10371036 DMERR ("no table type is set, can't allocate mempools" );
@@ -1048,6 +1047,9 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
10481047 goto init_bs ;
10491048 }
10501049
1050+ if (md -> queue -> limits .features & BLK_FEAT_POLL )
1051+ bioset_flags |= BIOSET_PERCPU_CACHE ;
1052+
10511053 for (unsigned int i = 0 ; i < t -> num_targets ; i ++ ) {
10521054 struct dm_target * ti = dm_table_get_target (t , i );
10531055
@@ -1060,8 +1062,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
10601062
10611063 io_front_pad = roundup (per_io_data_size ,
10621064 __alignof__(struct dm_io )) + DM_IO_BIO_OFFSET ;
1063- if (bioset_init (& pools -> io_bs , pool_size , io_front_pad ,
1064- dm_table_supports_poll (t ) ? BIOSET_PERCPU_CACHE : 0 ))
1065+ if (bioset_init (& pools -> io_bs , pool_size , io_front_pad , bioset_flags ))
10651066 goto out_free_pools ;
10661067 if (t -> integrity_supported &&
10671068 bioset_integrity_create (& pools -> io_bs , pool_size ))
@@ -1404,14 +1405,6 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
14041405 return & t -> targets [(KEYS_PER_NODE * n ) + k ];
14051406}
14061407
1407- static int device_not_poll_capable (struct dm_target * ti , struct dm_dev * dev ,
1408- sector_t start , sector_t len , void * data )
1409- {
1410- struct request_queue * q = bdev_get_queue (dev -> bdev );
1411-
1412- return !test_bit (QUEUE_FLAG_POLL , & q -> queue_flags );
1413- }
1414-
14151408/*
14161409 * type->iterate_devices() should be called when the sanity check needs to
14171410 * iterate and check all underlying data devices. iterate_devices() will
@@ -1459,19 +1452,6 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
14591452 return 0 ;
14601453}
14611454
1462- static bool dm_table_supports_poll (struct dm_table * t )
1463- {
1464- for (unsigned int i = 0 ; i < t -> num_targets ; i ++ ) {
1465- struct dm_target * ti = dm_table_get_target (t , i );
1466-
1467- if (!ti -> type -> iterate_devices ||
1468- ti -> type -> iterate_devices (ti , device_not_poll_capable , NULL ))
1469- return false;
1470- }
1471-
1472- return true;
1473- }
1474-
14751455/*
14761456 * Check whether a table has no data devices attached using each
14771457 * target's iterate_devices method.
@@ -1817,6 +1797,13 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
18171797 if (!dm_table_supports_nowait (t ))
18181798 limits -> features &= ~BLK_FEAT_NOWAIT ;
18191799
1800+ /*
1801+ * The current polling impementation does not support request based
1802+ * stacking.
1803+ */
1804+ if (!__table_type_bio_based (t -> type ))
1805+ limits -> features &= ~BLK_FEAT_POLL ;
1806+
18201807 if (!dm_table_supports_discards (t )) {
18211808 limits -> max_hw_discard_sectors = 0 ;
18221809 limits -> discard_granularity = 0 ;
@@ -1858,21 +1845,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
18581845 return r ;
18591846
18601847 dm_update_crypto_profile (q , t );
1861-
1862- /*
1863- * Check for request-based device is left to
1864- * dm_mq_init_request_queue()->blk_mq_init_allocated_queue().
1865- *
1866- * For bio-based device, only set QUEUE_FLAG_POLL when all
1867- * underlying devices supporting polling.
1868- */
1869- if (__table_type_bio_based (t -> type )) {
1870- if (dm_table_supports_poll (t ))
1871- blk_queue_flag_set (QUEUE_FLAG_POLL , q );
1872- else
1873- blk_queue_flag_clear (QUEUE_FLAG_POLL , q );
1874- }
1875-
18761848 return 0 ;
18771849}
18781850
0 commit comments