@@ -501,6 +501,7 @@ static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lb
501501
502502 memset (& data_vio -> record_name , 0 , sizeof (data_vio -> record_name ));
503503 memset (& data_vio -> duplicate , 0 , sizeof (data_vio -> duplicate ));
504+ vdo_reset_completion (& data_vio -> decrement_completion );
504505 vdo_reset_completion (completion );
505506 completion -> error_handler = handle_data_vio_error ;
506507 set_data_vio_logical_callback (data_vio , attempt_logical_block_lock );
@@ -1273,12 +1274,14 @@ static void clean_hash_lock(struct vdo_completion *completion)
12731274static void finish_cleanup (struct data_vio * data_vio )
12741275{
12751276 struct vdo_completion * completion = & data_vio -> vio .completion ;
1277+ u32 discard_size = min_t (u32 , data_vio -> remaining_discard ,
1278+ VDO_BLOCK_SIZE - data_vio -> offset );
12761279
12771280 VDO_ASSERT_LOG_ONLY (data_vio -> allocation .lock == NULL ,
12781281 "complete data_vio has no allocation lock" );
12791282 VDO_ASSERT_LOG_ONLY (data_vio -> hash_lock == NULL ,
12801283 "complete data_vio has no hash lock" );
1281- if ((data_vio -> remaining_discard <= VDO_BLOCK_SIZE ) ||
1284+ if ((data_vio -> remaining_discard <= discard_size ) ||
12821285 (completion -> result != VDO_SUCCESS )) {
12831286 struct data_vio_pool * pool = completion -> vdo -> data_vio_pool ;
12841287
@@ -1287,12 +1290,12 @@ static void finish_cleanup(struct data_vio *data_vio)
12871290 return ;
12881291 }
12891292
1290- data_vio -> remaining_discard -= min_t (u32 , data_vio -> remaining_discard ,
1291- VDO_BLOCK_SIZE - data_vio -> offset );
1293+ data_vio -> remaining_discard -= discard_size ;
12921294 data_vio -> is_partial = (data_vio -> remaining_discard < VDO_BLOCK_SIZE );
12931295 data_vio -> read = data_vio -> is_partial ;
12941296 data_vio -> offset = 0 ;
12951297 completion -> requeue = true;
1298+ data_vio -> first_reference_operation_complete = false;
12961299 launch_data_vio (data_vio , data_vio -> logical .lbn + 1 );
12971300}
12981301
@@ -1965,7 +1968,8 @@ static void allocate_block(struct vdo_completion *completion)
19651968 .state = VDO_MAPPING_STATE_UNCOMPRESSED ,
19661969 };
19671970
1968- if (data_vio -> fua ) {
1971+ if (data_vio -> fua ||
1972+ data_vio -> remaining_discard > (u32 ) (VDO_BLOCK_SIZE - data_vio -> offset )) {
19691973 prepare_for_dedupe (data_vio );
19701974 return ;
19711975 }
@@ -2042,7 +2046,6 @@ void continue_data_vio_with_block_map_slot(struct vdo_completion *completion)
20422046 return ;
20432047 }
20442048
2045-
20462049 /*
20472050 * We don't need to write any data, so skip allocation and just update the block map and
20482051 * reference counts (via the journal).
@@ -2051,7 +2054,7 @@ void continue_data_vio_with_block_map_slot(struct vdo_completion *completion)
20512054 if (data_vio -> is_zero )
20522055 data_vio -> new_mapped .state = VDO_MAPPING_STATE_UNCOMPRESSED ;
20532056
2054- if (data_vio -> remaining_discard > VDO_BLOCK_SIZE ) {
2057+ if (data_vio -> remaining_discard > ( u32 ) ( VDO_BLOCK_SIZE - data_vio -> offset ) ) {
20552058 /* This is not the final block of a discard so we can't acknowledge it yet. */
20562059 update_metadata_for_data_vio_write (data_vio , NULL );
20572060 return ;
0 commit comments