From 5a5dbdd821047e217e31df24427e8ce0d896b1a1 Mon Sep 17 00:00:00 2001 From: Julien Jerphanion Date: Tue, 27 Feb 2024 17:20:17 +0100 Subject: [PATCH] maint: Remove usage of `folly::enumerate` Signed-off-by: Julien Jerphanion --- cpp/arcticdb/column_store/column.hpp | 1 - cpp/arcticdb/column_store/column_map.hpp | 7 +- .../column_store/memory_segment_impl.cpp | 52 ++++++------ .../column_store/memory_segment_impl.hpp | 9 ++- .../test/benchmark_memory_segment.cpp | 1 - .../column_store/test/test_memory_segment.cpp | 62 ++++++++------ cpp/arcticdb/pipeline/column_stats.cpp | 5 +- cpp/arcticdb/pipeline/frame_slice_map.hpp | 19 ++--- cpp/arcticdb/pipeline/test/test_pipeline.cpp | 6 +- cpp/arcticdb/processing/clause.cpp | 60 ++++++++------ cpp/arcticdb/processing/processing_unit.cpp | 6 +- cpp/arcticdb/processing/processing_unit.hpp | 12 ++- cpp/arcticdb/processing/test/test_clause.cpp | 27 ++++--- cpp/arcticdb/storage/azure/azure_storage.cpp | 4 +- cpp/arcticdb/storage/s3/detail-inl.hpp | 9 ++- cpp/arcticdb/storage/s3/s3_storage.hpp | 4 +- cpp/arcticdb/util/composite.hpp | 1 - .../version/local_versioned_engine.cpp | 80 ++++++++++--------- cpp/arcticdb/version/snapshot.cpp | 17 ++-- cpp/arcticdb/version/symbol_list.cpp | 3 +- .../version/test/test_version_store.cpp | 3 +- cpp/arcticdb/version/version_core.cpp | 24 +++--- .../version/version_map_batch_methods.cpp | 24 +++--- .../version/version_map_batch_methods.hpp | 7 +- cpp/arcticdb/version/version_store_api.cpp | 8 +- 25 files changed, 252 insertions(+), 199 deletions(-) diff --git a/cpp/arcticdb/column_store/column.hpp b/cpp/arcticdb/column_store/column.hpp index 3e456a6e520..040003bd250 100644 --- a/cpp/arcticdb/column_store/column.hpp +++ b/cpp/arcticdb/column_store/column.hpp @@ -18,7 +18,6 @@ #include #include -#include // Compilation fails on Mac if cstdio is not included prior to folly/Function.h due to a missing definition of memalign in folly/Memory.h #ifdef __APPLE__ #include diff --git a/cpp/arcticdb/column_store/column_map.hpp b/cpp/arcticdb/column_store/column_map.hpp index f046f81b9b4..17b2b80f70c 100644 --- a/cpp/arcticdb/column_store/column_map.hpp +++ b/cpp/arcticdb/column_store/column_map.hpp @@ -12,8 +12,6 @@ #include #include -#include - #ifdef ARCTICDB_USING_CONDA #include #else @@ -59,8 +57,9 @@ class ColumnMap { } void set_from_descriptor(const StreamDescriptor& descriptor) { - for(const auto& field : folly::enumerate(descriptor.fields())) { - insert(field->name(), field.index); + for(size_t i = 0; i < descriptor.fields().size(); i++) { + const auto& field = descriptor.fields()[i]; + insert(field.name(), i); } } diff --git a/cpp/arcticdb/column_store/memory_segment_impl.cpp b/cpp/arcticdb/column_store/memory_segment_impl.cpp index 4f036536649..4bf9225d034 100644 --- a/cpp/arcticdb/column_store/memory_segment_impl.cpp +++ b/cpp/arcticdb/column_store/memory_segment_impl.cpp @@ -188,17 +188,18 @@ std::shared_ptr SegmentInMemoryImpl::filter(const util::Bit // Index is built to make rank queries faster std::unique_ptr filter_idx; - for(const auto& column : folly::enumerate(columns())) { - (*column)->type().visit_tag([&] (auto type_desc_tag){ + for(size_t index = 0, columns_size = columns().size(); index < columns_size; index++) { + const auto& column = columns_[index]; + column->type().visit_tag([&] (auto type_desc_tag){ using TypeDescriptorTag = decltype(type_desc_tag); using DataTypeTag = typename TypeDescriptorTag::DataTypeTag; using RawType = typename DataTypeTag::raw_type; const util::BitSet* final_bitset; util::BitSet bitset_including_sparse; - auto sparse_map = (*column)->opt_sparse_map(); + auto sparse_map = column->opt_sparse_map(); std::unique_ptr sparse_idx; - auto output_col_idx = column.index; + auto output_col_idx = index; if (is_input_sparse || sparse_map) { filter_idx = std::make_unique(); filter_bitset.build_rs_index(filter_idx.get()); @@ -209,13 +210,13 @@ std::shared_ptr SegmentInMemoryImpl::filter(const util::Bit sparse_idx = std::make_unique(); sparse_map.value().build_rs_index(sparse_idx.get()); } else { - bitset_including_sparse.resize((*column)->row_count()); + bitset_including_sparse.resize(column->row_count()); } if (bitset_including_sparse.count() == 0) { // No values are set in the sparse column, skip it return; } - output_col_idx = output->add_column(field(column.index), bitset_including_sparse.count(), true); + output_col_idx = output->add_column(field(index), bitset_including_sparse.count(), true); final_bitset = &bitset_including_sparse; } else { final_bitset = &filter_bitset; @@ -224,7 +225,7 @@ std::shared_ptr SegmentInMemoryImpl::filter(const util::Bit if (sparse_map) output_col.opt_sparse_map() = std::make_optional(); auto output_ptr = reinterpret_cast(output_col.ptr()); - auto input_data = (*column)->data(); + auto input_data = column->data(); auto bitset_iter = final_bitset->first(); auto row_count_so_far = 0; @@ -356,11 +357,12 @@ std::vector> SegmentInMemoryImpl::partition return output; } - for (const auto& segment_count: folly::enumerate(segment_counts)) { - if (*segment_count > 0) { - auto& seg = output.at(segment_count.index); - seg = get_output_segment(*segment_count); - seg->set_row_data(*segment_count - 1); + for (size_t idx = 0, segment_counts_size = segment_counts.size(); idx < segment_counts_size; idx++) { + const auto& segment_count = segment_counts.at(idx); + if (segment_count > 0) { + auto& seg = output.at(idx); + seg = get_output_segment(segment_count); + seg->set_row_data(segment_count - 1); seg->set_string_pool(string_pool_); seg->set_compacted(compacted_); if (metadata_) { @@ -371,21 +373,23 @@ std::vector> SegmentInMemoryImpl::partition } } - for(const auto& column : folly::enumerate(columns())) { - (*column)->type().visit_tag([&] (auto type_desc_tag){ + for(size_t idx = 0, columns_size = columns().size(); idx < columns_size; idx++) { + const auto& column = columns_[idx]; + column->type().visit_tag([&] (auto type_desc_tag){ using TypeDescriptorTag = decltype(type_desc_tag); using ColumnTagType = typename TypeDescriptorTag::DataTypeTag; using RawType = typename ColumnTagType::raw_type; - auto output_col_idx = column.index; + auto output_col_idx = idx; std::vector output_ptrs{output.size(), nullptr}; - for (const auto& segment: folly::enumerate(output)) { - if (static_cast(*segment)) { - output_ptrs.at(segment.index) = reinterpret_cast((*segment)->column(output_col_idx).ptr()); + for (size_t seg_idx = 0; seg_idx < output.size(); seg_idx++) { + const auto& segment = output.at(seg_idx); + if (static_cast(segment)) { + output_ptrs.at(seg_idx) = reinterpret_cast((segment)->column(output_col_idx).ptr()); } } - auto input_data = (*column)->data(); + auto input_data = column->data(); size_t overall_idx = 0; while(auto block = input_data.next()) { auto input_ptr = reinterpret_cast(block.value().data()); @@ -460,7 +464,8 @@ std::shared_ptr SegmentInMemoryImpl::truncate( output->set_metadata(std::move(metadata)); } - for (const auto&& [idx, column] : folly::enumerate(columns_)) { + for (size_t idx = 0, columns_size = columns_.size(); idx < columns_size; idx++) { + const auto& column = columns_[idx]; const TypeDescriptor column_type = column->type(); const Field& field = descriptor_->field(idx); std::shared_ptr truncated_column = Column::truncate(column, start_row, end_row); @@ -492,9 +497,10 @@ void SegmentInMemoryImpl::concatenate(SegmentInMemoryImpl&& other, bool unique_c row_count() == other.row_count(), "Cannot concatenate segments with differing row counts: {} {}", row_count(), other.row_count()); - for (const auto& field: folly::enumerate(other.fields())) { - if (!unique_column_names || !column_index(field->name()).has_value()) { - add_column(*field, other.column_ptr(field.index)); + for (size_t idx = 0, fields_size = other.fields().size(); idx < fields_size; idx++) { + const auto& field = other.fields()[idx]; + if (!unique_column_names || !column_index(field.name()).has_value()) { + add_column(field, other.column_ptr(idx)); } } } diff --git a/cpp/arcticdb/column_store/memory_segment_impl.hpp b/cpp/arcticdb/column_store/memory_segment_impl.hpp index b09b726d569..31f3e99edcf 100644 --- a/cpp/arcticdb/column_store/memory_segment_impl.hpp +++ b/cpp/arcticdb/column_store/memory_segment_impl.hpp @@ -20,7 +20,6 @@ #include #include -#include namespace google::protobuf { @@ -438,11 +437,13 @@ class SegmentInMemoryImpl { } void push_back(const Row &row) { - for (auto it : folly::enumerate(row)) { - it->visit([&it, that=this](const auto &val) { + size_t index = 0; + for (auto it: row) { + it.visit([&it, &index, that=this](const auto &val) { if(val) - that->set_scalar(it.index, val.value()); + that->set_scalar(index, val); }); + index++; } end_row(); } diff --git a/cpp/arcticdb/column_store/test/benchmark_memory_segment.cpp b/cpp/arcticdb/column_store/test/benchmark_memory_segment.cpp index 69613fc011f..b7dc12ad511 100644 --- a/cpp/arcticdb/column_store/test/benchmark_memory_segment.cpp +++ b/cpp/arcticdb/column_store/test/benchmark_memory_segment.cpp @@ -9,7 +9,6 @@ #include #include -#include #include diff --git a/cpp/arcticdb/column_store/test/test_memory_segment.cpp b/cpp/arcticdb/column_store/test/test_memory_segment.cpp index 3416242e75c..84f5ba58cc4 100644 --- a/cpp/arcticdb/column_store/test/test_memory_segment.cpp +++ b/cpp/arcticdb/column_store/test/test_memory_segment.cpp @@ -107,23 +107,27 @@ TEST(MemSegment, IterateAndGetValues) { auto frame_wrapper = get_test_timeseries_frame("test_get_values", 100, 0); auto& segment = frame_wrapper.segment_; - for( auto row : folly::enumerate(segment)) { - for(auto value : folly::enumerate(*row)) { - value->visit([&] (const auto& val) { + size_t row_index = 0; + for(auto row : segment) { + size_t value_index = 0; + for(auto value : row) { + value.visit([&] (const auto& val) { using ValType = std::decay_t; - if( value.index == 0) { - ASSERT_EQ(static_cast(row.index), val); + if(value_index == 0) { + ASSERT_EQ(static_cast(row_index), val); } else { if constexpr(std::is_integral_v) { - ASSERT_EQ(val, get_integral_value_for_offset(0, row.index)); + ASSERT_EQ(val, get_integral_value_for_offset(0, row_index)); } if constexpr (std::is_floating_point_v) { - ASSERT_EQ(val, get_floating_point_value_for_offset(0, row.index)); + ASSERT_EQ(val, get_floating_point_value_for_offset(0, row_index)); } } }); + value_index++; } + row_index++; } } @@ -136,12 +140,14 @@ TEST(MemSegment, IterateWithEmptyTypeColumn) { auto empty_column = std::make_shared(generate_empty_column()); seg.add_column(scalar_field(empty_column->type().data_type(), "empty_column"), empty_column); seg.set_row_id(num_rows - 1); - for (auto&& [idx, row]: folly::enumerate(seg)) { + auto idx = 0; + for (auto&& row: seg) { ASSERT_EQ(static_cast(idx), row.scalar_at(0)); // Exception should be thrown regardless of the type requested for empty type columns EXPECT_THROW([[maybe_unused]] auto v = row.scalar_at(1).has_value(), InternalException); EXPECT_THROW([[maybe_unused]] auto v = row.scalar_at(1).has_value(), InternalException); EXPECT_THROW([[maybe_unused]] auto v = row.scalar_at(1).has_value(), InternalException); + ++idx; } } @@ -151,23 +157,27 @@ TEST(MemSegment, CopyViaIterator) { auto target = get_test_empty_timeseries_segment("to_sort", 0u); std::copy(std::begin(source), std::end(source), std::back_inserter(target)); - for( auto row : folly::enumerate(target)) { - for(auto value : folly::enumerate(*row)) { - value->visit([&] (const auto& val) { + size_t row_index = 0; + for(auto row : target) { + size_t value_index = 0; + for(auto value : row) { + value.visit([&] (const auto& val) { using ValType = std::decay_t; - if( value.index == 0) { - ASSERT_EQ(static_cast(row.index), val); + if(value_index == 0) { + ASSERT_EQ(static_cast(row_index), val); } else { if constexpr(std::is_integral_v) { - ASSERT_EQ(val, get_integral_value_for_offset(0, row.index)); + ASSERT_EQ(val, get_integral_value_for_offset(0, row_index)); } if constexpr (std::is_floating_point_v) { - ASSERT_EQ(val, get_floating_point_value_for_offset(0, row.index)); + ASSERT_EQ(val, get_floating_point_value_for_offset(0, row_index)); } } }); + value_index++; } + row_index++; } } @@ -187,22 +197,26 @@ TEST(MemSegment, ModifyViaIterator) { } } - for (auto row : folly::enumerate(segment)) { - for (auto value : folly::enumerate(*row)) { - value->visit([&](const auto &val) { + size_t row_index = 0; + for (auto row : segment) { + size_t value_index = 0; + for (auto value : row) { + value.visit([&](const auto &val) { using ValType = std::decay_t; - if (value.index == 0) { - ASSERT_EQ(static_cast(row.index + 1), val); + if (value_index == 0) { + ASSERT_EQ(static_cast(row_index + 1), val); } else { if constexpr(std::is_integral_v) { - ASSERT_EQ(val, get_integral_value_for_offset(0, row.index) + 1); + ASSERT_EQ(val, get_integral_value_for_offset(0, row_index) + 1); } if constexpr (std::is_floating_point_v) { - ASSERT_EQ(val, get_floating_point_value_for_offset(0, row.index) + 1); + ASSERT_EQ(val, get_floating_point_value_for_offset(0, row_index) + 1); } } }); + value_index++; } + row_index++; } } @@ -483,12 +497,14 @@ TEST(MemSegment, Filter) { auto filtered_seg = seg.filter(filter_bitset); - for (auto&& [idx, row]: folly::enumerate(filtered_seg)) { + size_t idx = 0; + for (auto row: filtered_seg) { ASSERT_EQ(static_cast(retained_rows[idx]), row.scalar_at(0)); // Exception should be thrown regardless of the type requested for empty type columns EXPECT_THROW([[maybe_unused]] auto v = row.scalar_at(1).has_value(), InternalException); EXPECT_THROW([[maybe_unused]] auto v = row.scalar_at(1).has_value(), InternalException); EXPECT_THROW([[maybe_unused]] auto v = row.scalar_at(1).has_value(), InternalException); + ++idx; } } diff --git a/cpp/arcticdb/pipeline/column_stats.cpp b/cpp/arcticdb/pipeline/column_stats.cpp index 58845b1d8da..6165745ef44 100644 --- a/cpp/arcticdb/pipeline/column_stats.cpp +++ b/cpp/arcticdb/pipeline/column_stats.cpp @@ -36,8 +36,9 @@ SegmentInMemory merge_column_stats_segments(const std::vector& } } } - for (const auto& type_descriptor: folly::enumerate(type_descriptors)) { - merged.add_column(FieldRef{*type_descriptor, field_names.at(type_descriptor.index)}, 0, false); + for (size_t index = 0; index < type_descriptors.size(); index++) { + auto &type_descriptor = type_descriptors.at(index); + merged.add_column(FieldRef{type_descriptor, field_names.at(index)}, 0, false); } for (auto &segment : segments) { merged.append(segment); diff --git a/cpp/arcticdb/pipeline/frame_slice_map.hpp b/cpp/arcticdb/pipeline/frame_slice_map.hpp index 95796942155..c5a3326c6f4 100644 --- a/cpp/arcticdb/pipeline/frame_slice_map.hpp +++ b/cpp/arcticdb/pipeline/frame_slice_map.hpp @@ -27,13 +27,14 @@ struct FrameSliceMap { const auto& row_range = context_row.slice_and_key().slice_.row_range; const auto& fields = context_row.descriptor().fields(); - for(const auto& field : folly::enumerate(fields)) { - if (!context_->is_in_filter_columns_set(field->name())) { - ARCTICDB_DEBUG(log::version(), "{} not present in filtered columns, skipping", field->name()); + for(size_t i = 0; i < fields.size(); i++) { + const auto& field = fields[i]; + if (!context_->is_in_filter_columns_set(field.name())) { + ARCTICDB_DEBUG(log::version(), "{} not present in filtered columns, skipping", field.name()); continue; } - const entity::DataType row_range_type = field->type().data_type(); + const entity::DataType row_range_type = field.type().data_type(); if(!dynamic_schema && !is_sequence_type(row_range_type)) { // In case we end up with static schema and empty we must check the type of the whole column // Because we could be reading an empty segment of a string column. Example: start with [None], @@ -43,21 +44,21 @@ struct FrameSliceMap { // TODO: This logic won't be needed when we move string handling into separate type handler if(is_empty_type(row_range_type)) { const entity::StreamDescriptor& descriptor = context_->descriptor(); - const size_t global_field_idx = descriptor.find_field(field->name()).value(); + const size_t global_field_idx = descriptor.find_field(field.name()).value(); const Field& global_field = descriptor.field(global_field_idx); const entity::DataType global_field_type = global_field.type().data_type(); if(!is_sequence_type(global_field_type)) { - ARCTICDB_DEBUG(log::version(), "{} not a string type in dynamic schema, skipping", field->name()); + ARCTICDB_DEBUG(log::version(), "{} not a string type in dynamic schema, skipping", field.name()); continue; } } else { - ARCTICDB_DEBUG(log::version(), "{} not a string type in dynamic schema, skipping", field->name()); + ARCTICDB_DEBUG(log::version(), "{} not a string type in dynamic schema, skipping", field.name()); continue; } } - auto& column = columns_[field->name()]; - ContextData data{context_row.index_, field.index}; + auto& column = columns_[field.name()]; + ContextData data{context_row.index_, i}; column.insert(std::make_pair(row_range, data)); } } diff --git a/cpp/arcticdb/pipeline/test/test_pipeline.cpp b/cpp/arcticdb/pipeline/test/test_pipeline.cpp index 0c263c068db..31ea47e7a72 100644 --- a/cpp/arcticdb/pipeline/test/test_pipeline.cpp +++ b/cpp/arcticdb/pipeline/test/test_pipeline.cpp @@ -99,8 +99,10 @@ struct TestProjection { desc.add_field(fd); auto col_index = segment.add_column(fd, 0, false); auto& column = segment.column(col_index); - for(auto&& row : folly::enumerate(segment)) { - column.set_scalar(row.index, projection_func_(*row)); + size_t row_index = 0; + for(const auto& row : segment) { + column.set_scalar(row_index, projection_func_(row)); + ++row_index; } return segment; } diff --git a/cpp/arcticdb/processing/clause.cpp b/cpp/arcticdb/processing/clause.cpp index d7fddd97a55..1d7d0eb83f5 100644 --- a/cpp/arcticdb/processing/clause.cpp +++ b/cpp/arcticdb/processing/clause.cpp @@ -38,7 +38,8 @@ std::vector> structure_by_row_slice(std::vector> res; RowRange previous_row_range; - for (const auto& [idx, ranges_and_key]: folly::enumerate(ranges_and_keys)) { + for (size_t idx = 0, ranges_and_keys_size = ranges_and_keys.size(); idx < ranges_and_keys_size; idx++) { + const auto& ranges_and_key = ranges_and_keys[idx]; RowRange current_row_range{ranges_and_key.row_range_}; if (current_row_range != previous_row_range) { res.emplace_back(); @@ -55,7 +56,8 @@ std::vector> structure_by_column_slice(std::vector> res; ColRange previous_col_range; - for (const auto& [idx, ranges_and_key]: folly::enumerate(ranges_and_keys)) { + for (size_t idx = 0, ranges_and_keys_size = ranges_and_keys.size(); idx < ranges_and_keys_size; idx++) { + const auto& ranges_and_key = ranges_and_keys[idx]; ColRange current_col_range{ranges_and_key.col_range_}; if (current_col_range != previous_col_range) { res.emplace_back(); @@ -139,8 +141,8 @@ EntityIds push_entities(std::shared_ptr component_manager, Pro } if (proc.row_ranges_.has_value()) { if (res.has_value()) { - for (const auto& [idx, row_range]: folly::enumerate(*proc.row_ranges_)) { - component_manager->add(row_range, res->at(idx)); + for (size_t idx = 0; idx < proc.row_ranges_->size(); idx++) { + component_manager->add(proc.row_ranges_->at(idx), res->at(idx)); } } else { res = std::make_optional(); @@ -151,8 +153,8 @@ EntityIds push_entities(std::shared_ptr component_manager, Pro } if (proc.col_ranges_.has_value()) { if (res.has_value()) { - for (const auto& [idx, col_range]: folly::enumerate(*proc.col_ranges_)) { - component_manager->add(col_range, res->at(idx)); + for (size_t idx = 0; idx < proc.col_ranges_->size(); idx++) { + component_manager->add(proc.col_ranges_->at(idx), res->at(idx)); } } else { res = std::make_optional(); @@ -163,8 +165,8 @@ EntityIds push_entities(std::shared_ptr component_manager, Pro } if (proc.atom_keys_.has_value()) { if (res.has_value()) { - for (const auto& [idx, atom_key]: folly::enumerate(*proc.atom_keys_)) { - component_manager->add(atom_key, res->at(idx)); + for (size_t idx = 0; idx < proc.atom_keys_->size(); idx++) { + component_manager->add(proc.atom_keys_->at(idx), res->at(idx)); } } else { res = std::make_optional(); @@ -379,16 +381,17 @@ Composite AggregationClause::process(Composite&& entity_id // Work out the common type between the processing units for the columns being aggregated for (auto& proc: procs_as_range) { - for (auto agg_data: folly::enumerate(aggregators_data)) { + for (size_t index = 0, size = aggregators_data.size(); index < size ; index++) { + auto& agg_data = aggregators_data.at(index); // Check that segments row ranges are the same internal::check( std::all_of(proc.row_ranges_->begin(), proc.row_ranges_->end(), [&] (const auto& row_range) {return row_range->start() == proc.row_ranges_->at(0)->start();}), "Expected all data segments in one processing unit to have the same row ranges"); - auto input_column_name = aggregators_.at(agg_data.index).get_input_column_name(); + auto input_column_name = aggregators_.at(index).get_input_column_name(); auto input_column = proc.get(input_column_name); if (std::holds_alternative(input_column)) { - agg_data->add_data_type(std::get(input_column).column_->type().data_type()); + agg_data.add_data_type(std::get(input_column).column_->type().data_type()); } } } @@ -487,8 +490,8 @@ Composite AggregationClause::process(Composite&& entity_id num_unique = next_group_id; util::check(num_unique != 0, "Got zero unique values"); - for (auto agg_data: folly::enumerate(aggregators_data)) { - auto input_column_name = aggregators_.at(agg_data.index).get_input_column_name(); + for (size_t i = 0; i < aggregators_data.size(); i++) { + auto input_column_name = aggregators_.at(i).get_input_column_name(); auto input_column = proc_.get(input_column_name); std::optional opt_input_column; if (std::holds_alternative(input_column)) { @@ -498,7 +501,7 @@ Composite AggregationClause::process(Composite&& entity_id opt_input_column.emplace(std::move(column_with_strings)); } } - agg_data->aggregate(opt_input_column, row_to_group, num_unique); + aggregators_data.at(i).aggregate(opt_input_column, row_to_group, num_unique); } }); } else { @@ -531,10 +534,10 @@ Composite AggregationClause::process(Composite&& entity_id }); index_col->set_row_data(grouping_map.size() - 1); - for (auto agg_data: folly::enumerate(aggregators_data)) { - seg.concatenate(agg_data->finalize(aggregators_.at(agg_data.index).get_output_column_name(), processing_config_.dynamic_schema_, num_unique)); + for (size_t i = 0; i < aggregators_data.size(); i++) { + auto& agg_data = aggregators_data.at(i); + seg.concatenate(agg_data.finalize(aggregators_.at(i).get_output_column_name(), processing_config_.dynamic_schema_, num_unique)); } - seg.set_string_pool(string_pool); seg.set_row_id(num_unique - 1); return Composite(push_entities(component_manager_, ProcessingUnit(std::move(seg)))); @@ -553,7 +556,8 @@ Composite AggregationClause::process(Composite&& entity_id size_t min_start_col = std::numeric_limits::max(); size_t max_end_col = 0; std::optional output_seg; - for (auto&& [idx, segment]: folly::enumerate(proc.segments_.value())) { + for (size_t idx = 0; idx < proc.segments_->size(); idx++) { + auto& segment = proc.segments_->at(idx); min_start_row = std::min(min_start_row, proc.row_ranges_->at(idx)->start()); max_end_row = std::max(max_end_row, proc.row_ranges_->at(idx)->end()); min_start_col = std::min(min_start_col, proc.col_ranges_->at(idx)->start()); @@ -579,7 +583,8 @@ Composite SplitClause::process(Composite&& entity_ids) con Composite ret; procs.broadcast([this, &ret](auto &&p) { auto proc = std::forward(p); - for (auto&& [idx, seg]: folly::enumerate(proc.segments_.value())) { + for (size_t idx = 0; idx < proc.segments_->size(); idx++) { + auto& seg = proc.segments_->at(idx); auto split_segs = seg->split(rows_); size_t start_row = proc.row_ranges_->at(idx)->start(); size_t end_row = 0; @@ -668,7 +673,8 @@ Composite MergeClause::process(Composite&& entity_ids) con size_t min_start_col = std::numeric_limits::max(); size_t max_end_col = 0; procs.broadcast([&input_streams, &min_start_row, &max_end_row, &min_start_col, &max_end_col](auto&& proc) { - for (auto&& [idx, segment]: folly::enumerate(proc.segments_.value())) { + for (size_t idx = 0; idx < proc.segments_->size(); idx++) { + auto& segment = proc.segments_->at(idx); size_t start_row = proc.row_ranges_->at(idx)->start(); min_start_row = start_row < min_start_row ? start_row : min_start_row; size_t end_row = proc.row_ranges_->at(idx)->end(); @@ -728,12 +734,12 @@ Composite ColumnStatsGenerationClause::process(Composite&& start_indexes.insert(key->start_index()); end_indexes.insert(key->end_index()); } - for (auto agg_data : folly::enumerate(aggregators_data)) { - auto input_column_name = column_stats_aggregators_->at(agg_data.index).get_input_column_name(); + for (size_t i = 0; i < aggregators_data.size(); i++) { + auto input_column_name = column_stats_aggregators_->at(i).get_input_column_name(); auto input_column = proc.get(input_column_name); if (std::holds_alternative(input_column)) { auto input_column_with_strings = std::get(input_column); - agg_data->aggregate(input_column_with_strings); + aggregators_data.at(i).aggregate(input_column_with_strings); } else { if (!processing_config_.dynamic_schema_) internal::raise( @@ -763,8 +769,9 @@ Composite ColumnStatsGenerationClause::process(Composite&& seg.descriptor().set_index(IndexDescriptor(0, IndexDescriptor::ROWCOUNT)); seg.add_column(scalar_field(DataType::NANOSECONDS_UTC64, start_index_column_name), start_index_col); seg.add_column(scalar_field(DataType::NANOSECONDS_UTC64, end_index_column_name), end_index_col); - for (const auto& agg_data: folly::enumerate(aggregators_data)) { - seg.concatenate(agg_data->finalize(column_stats_aggregators_->at(agg_data.index).get_output_column_names())); + for (size_t i = 0; i < aggregators_data.size(); i++) { + auto& agg_data = aggregators_data.at(i); + seg.concatenate(agg_data.finalize(column_stats_aggregators_->at(i).get_output_column_names())); } seg.set_row_id(0); return Composite(push_entities(component_manager_, ProcessingUnit(std::move(seg)))); @@ -783,7 +790,8 @@ Composite RowRangeClause::process(Composite &&entity_ids) auto procs = gather_entities(component_manager_, std::move(entity_ids)); Composite output; procs.broadcast([&output, this](ProcessingUnit &proc) { - for (auto&& [idx, row_range]: folly::enumerate(proc.row_ranges_.value())) { + for (size_t idx = 0; idx < proc.row_ranges_->size(); idx++) { + auto row_range = proc.row_ranges_->at(idx); if ((start_ > row_range->start() && start_ < row_range->end()) || (end_ > row_range->start() && end_ < row_range->end())) { // Zero-indexed within this slice diff --git a/cpp/arcticdb/processing/processing_unit.cpp b/cpp/arcticdb/processing/processing_unit.cpp index f0f98c4eedc..95212a27e2c 100644 --- a/cpp/arcticdb/processing/processing_unit.cpp +++ b/cpp/arcticdb/processing/processing_unit.cpp @@ -16,7 +16,8 @@ void ProcessingUnit::apply_filter( "ProcessingUnit::apply_filter requires all of segments, row_ranges, and col_ranges to be present"); auto filter_down_stringpool = optimisation == PipelineOptimisation::MEMORY; - for (auto&& [idx, segment]: folly::enumerate(*segments_)) { + for (size_t idx = 0, segment_size = segments_.value().size(); idx < segment_size; idx++) { + auto&& segment = segments_->at(idx); auto seg = filter_segment(*segment, bitset, filter_down_stringpool); @@ -33,7 +34,8 @@ void ProcessingUnit::truncate(size_t start_row, size_t end_row) { internal::check(segments_.has_value() && row_ranges_.has_value() && col_ranges_.has_value(), "ProcessingUnit::truncate requires all of segments, row_ranges, and col_ranges to be present"); - for (auto&& [idx, segment]: folly::enumerate(*segments_)) { + for (size_t idx = 0, segment_size = segments_.value().size(); idx < segment_size; idx++) { + auto&& segment = segments_->at(idx); auto seg = segment->truncate(start_row, end_row, false); auto num_rows = seg.is_null() ? 0 : seg.row_count(); row_ranges_->at(idx) = std::make_shared(row_ranges_->at(idx)->first, row_ranges_->at(idx)->first + num_rows); diff --git a/cpp/arcticdb/processing/processing_unit.hpp b/cpp/arcticdb/processing/processing_unit.hpp index 2e7bb1beb16..2794fc0c106 100644 --- a/cpp/arcticdb/processing/processing_unit.hpp +++ b/cpp/arcticdb/processing/processing_unit.hpp @@ -115,7 +115,8 @@ namespace arcticdb { auto proc = std::forward(p); internal::check(proc.segments_.has_value() && proc.row_ranges_.has_value() && proc.col_ranges_.has_value(), "collect_segments requires all of segments, row_ranges, and col_ranges to be present"); - for (auto&& [idx, segment]: folly::enumerate(*proc.segments_)) { + for (size_t idx = 0; idx < proc.segments_.value().size(); idx++) { + auto segment = proc.segments_->at(idx); pipelines::FrameSlice frame_slice(*proc.col_ranges_->at(idx), *proc.row_ranges_->at(idx)); output.emplace_back(std::move(*segment), std::move(frame_slice)); } @@ -193,9 +194,11 @@ namespace arcticdb { std::vector procs{static_cast(num_buckets)}; BucketizerType bucketizer(num_buckets); auto [row_to_bucket, bucket_counts] = get_buckets(partitioning_column, grouper, bucketizer); - for (auto&& [input_idx, seg]: folly::enumerate(input.segments_.value())) { + for (size_t input_idx = 0; input_idx < input.segments_.value().size(); input_idx++) { + auto seg = input.segments_.value().at(input_idx); auto new_segs = partition_segment(*seg, row_to_bucket, bucket_counts); - for (auto && [output_idx, new_seg]: folly::enumerate(new_segs)) { + for (size_t output_idx = 0; output_idx < new_segs.size(); output_idx++) { + auto && new_seg = new_segs.at(output_idx); if (bucket_counts.at(output_idx) > 0) { if (!procs.at(output_idx).segments_.has_value()) { procs.at(output_idx).segments_ = std::make_optional>>(); @@ -208,7 +211,8 @@ namespace arcticdb { } } } - for (auto&& [idx, proc]: folly::enumerate(procs)) { + for (size_t idx = 0; idx < procs.size(); idx++) { + auto &proc = procs.at(idx); if (bucket_counts.at(idx) > 0) { proc.bucket_ = idx; output.push_back(std::move(proc)); diff --git a/cpp/arcticdb/processing/test/test_clause.cpp b/cpp/arcticdb/processing/test/test_clause.cpp index 8e2f96e87c8..7f1e33c79b3 100644 --- a/cpp/arcticdb/processing/test/test_clause.cpp +++ b/cpp/arcticdb/processing/test/test_clause.cpp @@ -60,8 +60,9 @@ TEST(Clause, Partition) { std::vector> tags = {{1, 3}, {2}}; std::array sizes = {180, 90}; - for (auto inner_seg : folly::enumerate(partitioned.as_range())){ - segment_scalar_assert_all_values_equal(*inner_seg, ColumnName("int8"), tags[inner_seg.index], sizes[inner_seg.index]); + auto inner_segs = partitioned.as_range(); + for (size_t i = 0; i < inner_segs.size(); i++) { + segment_scalar_assert_all_values_equal(inner_segs[i], ColumnName("int8"), tags[i], sizes[i]); } } @@ -81,8 +82,9 @@ TEST(Clause, PartitionString) { std::vector tags = {1, 3}; std::vector sizes = {120, 60}; - for (auto inner_seg : folly::enumerate(partitioned.as_range())){ - segment_string_assert_all_values_equal(*inner_seg, ColumnName("strings"), fmt::format("string_{}", tags[inner_seg.index]), sizes[inner_seg.index]); + auto inner_segs = partitioned.as_range(); + for (size_t i = 0; i < inner_segs.size(); i++) { + segment_string_assert_all_values_equal(inner_segs[i], ColumnName("strings"), fmt::format("string_{}", tags[i]), sizes[i]); } } @@ -423,20 +425,21 @@ TEST(Clause, Merge) { auto output_row = i % seg_size; const auto& expected_seg = copies[i % num_segs]; auto expected_row = i / num_segs; - for(auto field : folly::enumerate(output_seg.descriptor().fields())) { - if(field.index == 1) + for (size_t field_index = 0; field_index < output_seg.descriptor().fields().size(); ++field_index) { + if (field_index == 1) { continue; - - visit_field(*field, [&output_seg, &expected_seg, output_row, expected_row, &field] (auto tdt) { + } + const auto& field = output_seg.descriptor().fields()[field_index]; + visit_field(field, [&output_seg, &expected_seg, output_row, expected_row, field_index] (auto tdt) { using DataTypeTag = typename decltype(tdt)::DataTypeTag; if constexpr(is_sequence_type(DataTypeTag::data_type)) { - const auto val1 = output_seg.string_at(output_row, position_t(field.index)); - const auto val2 = expected_seg.string_at(expected_row, position_t(field.index)); + const auto val1 = output_seg.string_at(output_row, position_t(field_index)); + const auto val2 = expected_seg.string_at(expected_row, position_t(field_index)); ASSERT_EQ(val1, val2); } else { using RawType = typename decltype(tdt)::DataTypeTag::raw_type; - const auto val1 = output_seg.scalar_at(output_row, field.index); - const auto val2 = expected_seg.scalar_at(expected_row, field.index); + const auto val1 = output_seg.scalar_at(output_row, field_index); + const auto val2 = expected_seg.scalar_at(expected_row, field_index); ASSERT_EQ(val1, val2); } }); diff --git a/cpp/arcticdb/storage/azure/azure_storage.cpp b/cpp/arcticdb/storage/azure/azure_storage.cpp index 8f218bb8f41..fe3228e8822 100644 --- a/cpp/arcticdb/storage/azure/azure_storage.cpp +++ b/cpp/arcticdb/storage/azure/azure_storage.cpp @@ -216,8 +216,8 @@ void do_remove_impl(Composite&& ks, (fg::from(ks.as_range()) | fg::move | fg::groupBy(fmt_db)).foreach( [&root_folder, b=std::move(bucketizer), delete_object_limit=delete_object_limit, &batch_counter, &to_delete, &submit_batch] (auto&& group) {//bypass incorrect 'set but no used" error for delete_object_limit auto key_type_dir = key_type_folder(root_folder, group.key()); - for (auto k : folly::enumerate(group.values())) { - auto blob_name = object_path(b.bucketize(key_type_dir, *k), *k); + for (auto k : group.values()) { + auto blob_name = object_path(b.bucketize(key_type_dir, k), k); to_delete.emplace_back(std::move(blob_name)); if (++batch_counter == delete_object_limit) { submit_batch(to_delete); diff --git a/cpp/arcticdb/storage/s3/detail-inl.hpp b/cpp/arcticdb/storage/s3/detail-inl.hpp index 687886c47e9..faf5a557a2e 100644 --- a/cpp/arcticdb/storage/s3/detail-inl.hpp +++ b/cpp/arcticdb/storage/s3/detail-inl.hpp @@ -220,16 +220,17 @@ namespace s3 { [&s3_client, &root_folder, &bucket_name, &to_delete, b = std::move( bucketizer), &failed_deletes](auto &&group) { auto key_type_dir = key_type_folder(root_folder, group.key()); - for (auto k: folly::enumerate(group.values())) { - auto s3_object_name = object_path(b.bucketize(key_type_dir, *k), *k); + for (size_t i = 0; i < group.values().size(); i++) { + auto k = group.values()[i]; + auto s3_object_name = object_path(b.bucketize(key_type_dir, k), k); to_delete.emplace_back(std::move(s3_object_name)); - if (to_delete.size() == delete_object_limit || k.index + 1 == group.size()) { + if (to_delete.size() == delete_object_limit || i + 1 == group.size()) { auto delete_object_result = s3_client.delete_objects(to_delete, bucket_name); if (delete_object_result.is_success()) { ARCTICDB_RUNTIME_DEBUG(log::storage(), "Deleted {} objects, one of which with key '{}'", to_delete.size(), - variant_key_view(*k)); + variant_key_view(k)); for (auto& bad_key: delete_object_result.get_output().failed_deletes) { auto bad_key_name = bad_key.s3_object_name.substr(key_type_dir.size(), std::string::npos); diff --git a/cpp/arcticdb/storage/s3/s3_storage.hpp b/cpp/arcticdb/storage/s3/s3_storage.hpp index a2958c00177..a04345cb1c4 100644 --- a/cpp/arcticdb/storage/s3/s3_storage.hpp +++ b/cpp/arcticdb/storage/s3/s3_storage.hpp @@ -161,8 +161,8 @@ inline std::optional> parse_no_proxy_env_var(cons hosts.push_back(host); } Aws::Utils::Array non_proxy_hosts{hosts.size()}; - for (const auto& tmp: folly::enumerate(hosts)) { - non_proxy_hosts[tmp.index] = *tmp; + for (size_t i = 0; i < hosts.size(); i++) { + non_proxy_hosts[i] = hosts[i]; } return non_proxy_hosts; } diff --git a/cpp/arcticdb/util/composite.hpp b/cpp/arcticdb/util/composite.hpp index 226c974c422..91f3039b7eb 100644 --- a/cpp/arcticdb/util/composite.hpp +++ b/cpp/arcticdb/util/composite.hpp @@ -12,7 +12,6 @@ #include #include -#include #include #include diff --git a/cpp/arcticdb/version/local_versioned_engine.cpp b/cpp/arcticdb/version/local_versioned_engine.cpp index 3e91e25c492..59de5eb3085 100644 --- a/cpp/arcticdb/version/local_versioned_engine.cpp +++ b/cpp/arcticdb/version/local_versioned_engine.cpp @@ -447,14 +447,15 @@ std::vector> LocalVersionedEngine::batch auto version_futures = batch_get_versions_async(store(), version_map(), stream_ids, version_queries, read_options.read_previous_on_failure_); std::vector> descriptor_futures; - for (auto&& [idx, version_fut]: folly::enumerate(version_futures)) { + for (size_t idx = 0; idx < version_futures.size(); ++idx) { descriptor_futures.emplace_back( - get_descriptor_async(std::move(version_fut), stream_ids[idx], version_queries[idx])); + get_descriptor_async(std::move(version_futures[idx]), stream_ids[idx], version_queries[idx])); } auto descriptors = folly::collectAll(descriptor_futures).get(); std::vector> descriptors_or_errors; descriptors_or_errors.reserve(descriptors.size()); - for (auto&& [idx, descriptor]: folly::enumerate(descriptors)) { + for (size_t idx = 0; idx < descriptors.size(); ++idx) { + auto& descriptor = descriptors[idx]; if (descriptor.hasValue()) { descriptors_or_errors.emplace_back(std::move(descriptor.value())); } else { @@ -651,9 +652,9 @@ std::vector> LocalVersionedEngine::batch_ stream_ids); internal::check(stream_ids.size() == stream_update_info_futures.size(), "stream_ids and stream_update_info_futures must be of the same size"); std::vector> write_metadata_versions_futs; - for (const auto&& [idx, stream_update_info_fut] : folly::enumerate(stream_update_info_futures)) { + for (size_t idx = 0; idx < stream_update_info_futures.size(); ++idx) { write_metadata_versions_futs.push_back( - std::move(stream_update_info_fut) + std::move(stream_update_info_futures[idx]) .thenValue([this, user_meta_proto = std::move(user_meta_protos[idx]), &stream_id = stream_ids[idx]](auto&& update_info) mutable -> folly::Future { auto index_key_fut = folly::Future::makeEmpty(); if (update_info.previous_index_key_.has_value()) { @@ -681,7 +682,8 @@ std::vector> LocalVersionedEngine::batch_ auto write_metadata_versions = folly::collectAll(write_metadata_versions_futs).get(); std::vector> write_metadata_versions_or_errors; write_metadata_versions_or_errors.reserve(write_metadata_versions.size()); - for (auto&& [idx, write_metadata_version]: folly::enumerate(write_metadata_versions)) { + for (size_t idx = 0; idx < write_metadata_versions.size(); ++idx) { + auto& write_metadata_version = write_metadata_versions[idx]; if (write_metadata_version.hasValue()) { write_metadata_versions_or_errors.emplace_back(std::move(write_metadata_version.value())); } else { @@ -1121,9 +1123,9 @@ std::vector> LocalVersionedEngine::te auto versions = batch_get_versions_async(store(), version_map(), stream_ids, version_queries, read_options.read_previous_on_failure_); std::vector> read_versions_futs; - for (auto&& [idx, version] : folly::enumerate(versions)) { + for (size_t idx = 0; idx < versions.size(); ++idx) { auto read_query = read_queries.empty() ? ReadQuery{} : read_queries[idx]; - read_versions_futs.emplace_back(std::move(version) + read_versions_futs.emplace_back(std::move(versions[idx]) .thenValue([store = store()](auto&& maybe_index_key) { missing_data::check( maybe_index_key.has_value(), @@ -1146,7 +1148,8 @@ std::vector> LocalVersionedEngine::te auto read_versions = folly::collectAll(read_versions_futs).get(); std::vector> read_versions_or_errors; read_versions_or_errors.reserve(read_versions.size()); - for (auto&& [idx, read_version]: folly::enumerate(read_versions)) { + for (size_t idx = 0; idx < read_versions.size(); ++idx) { + auto& read_version = read_versions[idx]; if (read_version.hasValue()) { read_versions_or_errors.emplace_back(std::move(read_version.value())); } else { @@ -1251,9 +1254,9 @@ std::vector> LocalVersionedEngine::batch_write_versio if(prune_previous_versions) { std::vector> pruned_keys_futures; auto pruned_versions_futures = batch_write_and_prune_previous(store(), version_map(), index_keys, stream_update_info_vector); - for(auto&& [idx, pruned_version_fut] : folly::enumerate(pruned_versions_futures)) { + for (size_t idx = 0; idx < pruned_versions_futures.size(); ++idx) { pruned_keys_futures.push_back( - std::move(pruned_version_fut) + std::move(pruned_versions_futures[idx]) .thenValue([this, &index_key = index_keys[idx]](auto&& atom_key_vec){ return delete_unreferenced_pruned_indexes(std::move(atom_key_vec), index_key); }) @@ -1346,9 +1349,8 @@ std::vector> LocalVersionedEngine::batch_ stream_ids); internal::check(stream_ids.size() == update_info_futs.size(), "stream_ids and update_info_futs must be of the same size"); std::vector> version_futures; - for(auto&& update_info_fut : folly::enumerate(update_info_futs)) { - auto idx = update_info_fut.index; - version_futures.push_back(std::move(*update_info_fut) + for (size_t idx = 0; idx < update_info_futs.size(); ++idx) { + version_futures.push_back(std::move(update_info_futs[idx]) .thenValue([this, &stream_id = stream_ids[idx], &write_options](auto&& update_info){ return create_version_id_and_dedup_map(std::move(update_info), stream_id, write_options); }).via(&async::cpu_executor()) @@ -1379,7 +1381,8 @@ std::vector> LocalVersionedEngine::batch_ auto write_versions = folly::collectAll(version_futures).get(); std::vector> write_versions_or_errors; write_versions_or_errors.reserve(write_versions.size()); - for (auto&& [idx, write_version]: folly::enumerate(write_versions)) { + for (size_t idx = 0; idx < write_versions.size(); ++idx) { + auto&& write_version = write_versions[idx]; if (write_version.hasValue()) { write_versions_or_errors.emplace_back(std::move(write_version.value())); } else { @@ -1463,9 +1466,9 @@ std::vector> LocalVersionedEngine::batch_ stream_ids); std::vector> append_versions_futs; internal::check(stream_ids.size() == stream_update_info_futures.size(), "stream_ids and stream_update_info_futures must be of the same size"); - for (const auto&& [idx, stream_update_info_fut] : folly::enumerate(stream_update_info_futures)) { + for (size_t idx = 0; idx < stream_update_info_futures.size(); ++idx) { append_versions_futs.push_back( - std::move(stream_update_info_fut) + std::move(stream_update_info_futures[idx]) .thenValue([this, frame = std::move(frames[idx]), validate_index, stream_id = stream_ids[idx], upsert](auto&& update_info) mutable -> folly::Future { auto index_key_fut = folly::Future::makeEmpty(); auto write_options = get_write_options(); @@ -1494,7 +1497,8 @@ std::vector> LocalVersionedEngine::batch_ auto append_versions = folly::collectAll(append_versions_futs).get(); std::vector> append_versions_or_errors; append_versions_or_errors.reserve(append_versions.size()); - for (auto&& [idx, append_version]: folly::enumerate(append_versions)) { + for (size_t idx = 0; idx < append_versions.size(); ++idx) { + auto&& append_version = append_versions[idx]; if (append_version.hasValue()) { append_versions_or_errors.emplace_back(std::move(append_version.value())); } else { @@ -1533,12 +1537,12 @@ std::map get_sym_versions_from_query( const std::vector& version_queries) { std::map sym_versions; WarnVersionTypeNotHandled warner; - for(const auto& stream_id : folly::enumerate(stream_ids)) { - const auto& query = version_queries[stream_id.index].content_; + for(size_t i = 0; i < stream_ids.size(); i++) { + const auto& query = version_queries[i].content_; if(std::holds_alternative(query)) - sym_versions[*stream_id] = std::get(query).version_id_; - else - warner.warn(*stream_id); + sym_versions[stream_ids[i]] = std::get(query).version_id_; + else + warner.warn(stream_ids[i]); } return sym_versions; } @@ -1548,12 +1552,12 @@ std::map get_multiple_sym_versions_from_query( const std::vector& version_queries) { std::map sym_versions; WarnVersionTypeNotHandled warner; - for(const auto& stream_id : folly::enumerate(stream_ids)) { - const auto& query = version_queries[stream_id.index].content_; + for(size_t i = 0; i < stream_ids.size(); i++) { + const auto& query = version_queries[i].content_; if(std::holds_alternative(query)) - sym_versions[*stream_id].push_back(std::get(query).version_id_); + sym_versions[stream_ids[i]].push_back(std::get(query).version_id_); else - warner.warn(*stream_id); + warner.warn(stream_ids[i]); } return sym_versions; } @@ -1568,13 +1572,13 @@ std::vector> LocalVersionedEngine auto versions_to_restore = batch_get_specific_version(store(), version_map(), sym_versions); std::vector>> fut_vec; - for(const auto& stream_id : folly::enumerate(stream_ids)) { - auto prev = previous->find(*stream_id); + for(size_t i = 0; i < stream_ids.size(); i++) { + auto prev = previous->find(stream_ids[i]); auto maybe_prev = prev == std::end(*previous) ? std::nullopt : std::make_optional(to_atom(prev->second)); - auto version = versions_to_restore->find(*stream_id); - util::check(version != std::end(*versions_to_restore), "Did not find version for symbol {}", *stream_id); - fut_vec.emplace_back(async::submit_io_task(AsyncRestoreVersionTask{store(), version_map(), *stream_id, to_atom(version->second), maybe_prev})); + auto version = versions_to_restore->find(stream_ids[i]); + util::check(version != std::end(*versions_to_restore), "Did not find version for symbol {}", stream_ids[i]); + fut_vec.emplace_back(async::submit_io_task(AsyncRestoreVersionTask{store(), version_map(), stream_ids[i], to_atom(version->second), maybe_prev})); } auto output = folly::collect(fut_vec).get(); @@ -1601,9 +1605,8 @@ std::vector LocalVersionedEngine::batch_get_update_times( const std::vector& version_queries) { util::check(stream_ids.size() == version_queries.size(), "Symbol vs version query size mismatch: {} != {}", stream_ids.size(), version_queries.size()); std::vector results; - for(const auto& stream_id : folly::enumerate(stream_ids)) { - const auto& query = version_queries[stream_id.index]; - results.emplace_back(get_update_time_internal(*stream_id, query)); + for(size_t i = 0; i < stream_ids.size(); i++) { + results.emplace_back(get_update_time_internal(stream_ids[i], version_queries[i])); } return results; } @@ -1666,14 +1669,15 @@ std::vector>>> metadata_futures; - for (auto&& [idx, version]: folly::enumerate(version_futures)) { - metadata_futures.emplace_back(get_metadata_async(std::move(version), stream_ids[idx], version_queries[idx])); + for (size_t idx = 0; idx < version_futures.size(); ++idx) { + metadata_futures.emplace_back(get_metadata_async(std::move(version_futures[idx]), stream_ids[idx], version_queries[idx])); } auto metadatas = folly::collectAll(metadata_futures).get(); std::vector>, DataError>> metadatas_or_errors; metadatas_or_errors.reserve(metadatas.size()); - for (auto&& [idx, metadata]: folly::enumerate(metadatas)) { + for (size_t idx = 0; idx < metadatas.size(); ++idx) { + auto&& metadata = metadatas[idx]; if (metadata.hasValue()) { metadatas_or_errors.emplace_back(std::move(metadata.value())); } else { diff --git a/cpp/arcticdb/version/snapshot.cpp b/cpp/arcticdb/version/snapshot.cpp index 027d7b61145..cbcd9af8d9e 100644 --- a/cpp/arcticdb/version/snapshot.cpp +++ b/cpp/arcticdb/version/snapshot.cpp @@ -228,8 +228,9 @@ std::unordered_map> all_ref_keys( ) { std::unordered_map> output; output.reserve(snap_names.size()); - for(auto name : folly::enumerate(snap_names)) - output.try_emplace(*name, ref_keys[name.index]); + for (size_t i = 0; i < snap_names.size(); i++) { + output.try_emplace(snap_names[i], ref_keys[i]); + } return output; } @@ -241,9 +242,9 @@ std::unordered_map> get_snapshot_keys_via_ const std::shared_ptr& store ){ std::unordered_map> output; - for (auto snap : folly::enumerate(snap_names)) { - if (!ref_key_exists[snap.index]) - output.try_emplace(*snap, std::nullopt); + for (size_t i = 0; i < snap_names.size(); i++) { + if (!ref_key_exists[i]) + output.try_emplace(snap_names[i], std::nullopt); } store->iterate_type(KeyType::SNAPSHOT, [&output](VariantKey &&vk) { @@ -251,9 +252,9 @@ std::unordered_map> get_snapshot_keys_via_ it->second = std::move(vk); }); - for (auto snap : folly::enumerate(snap_names)) { - if (ref_key_exists[snap.index]) - output.try_emplace(*snap, ref_keys[snap.index]); + for (size_t i = 0; i < snap_names.size(); i++) { + if (ref_key_exists[i]) + output.try_emplace(snap_names[i], ref_keys[i]); } return output; } diff --git a/cpp/arcticdb/version/symbol_list.cpp b/cpp/arcticdb/version/symbol_list.cpp index ee38323ed68..8bc0f1bd2d7 100644 --- a/cpp/arcticdb/version/symbol_list.cpp +++ b/cpp/arcticdb/version/symbol_list.cpp @@ -74,7 +74,8 @@ std::vector load_previous_from_version_keys( auto res = folly::collect(batch_get_latest_undeleted_and_latest_versions_async(store, data.version_map_, stream_ids)).get(); std::vector symbols; - for(auto&& [idx, opt_key_pair]: folly::enumerate(res)) { + for(size_t idx = 0, res_size = res.size(); idx < res_size; idx++) { + auto&& opt_key_pair = res[idx]; const auto& [maybe_undeleted, _] = opt_key_pair; if(maybe_undeleted) { const auto version_id = maybe_undeleted->version_id(); diff --git a/cpp/arcticdb/version/test/test_version_store.cpp b/cpp/arcticdb/version/test/test_version_store.cpp index 80158a5c09d..0a01d13f60d 100644 --- a/cpp/arcticdb/version/test/test_version_store.cpp +++ b/cpp/arcticdb/version/test/test_version_store.cpp @@ -346,7 +346,8 @@ TEST_F(VersionStoreTest, StressBatchReadUncompressed) { ReadOptions read_options; read_options.set_batch_throw_on_error(true); auto latest_versions = test_store_->batch_read(symbols, std::vector(10), read_queries, read_options); - for(auto&& [idx, version] : folly::enumerate(latest_versions)) { + for (size_t idx = 0; idx < latest_versions.size(); ++idx) { + auto& version = latest_versions[idx]; auto expected = get_test_simple_frame(std::get(version).item.symbol(), 10, idx); bool equal = expected.segment_ == std::get(version).frame_data.frame(); ASSERT_EQ(equal, true); diff --git a/cpp/arcticdb/version/version_core.cpp b/cpp/arcticdb/version/version_core.cpp index 8d3a71cded7..6cf5320268d 100644 --- a/cpp/arcticdb/version/version_core.cpp +++ b/cpp/arcticdb/version/version_core.cpp @@ -483,20 +483,20 @@ Composite process_clauses( &clauses, comp_entity_ids = std::move(comp_entity_ids)](std::vector&& segment_and_slices) mutable { auto entity_ids = std::get(comp_entity_ids[0]); - for (auto&& [idx, segment_and_slice]: folly::enumerate(segment_and_slices)) { + for (size_t idx = 0; idx < segment_and_slices.size(); idx++) { std::lock_guard lock(entity_added_mtx[entity_ids[idx]]); if (!entity_added[entity_ids[idx]]) { component_manager->add( - std::make_shared(std::move(segment_and_slice.segment_in_memory_)), + std::make_shared(std::move(segment_and_slices[idx].segment_in_memory_)), entity_ids[idx], segment_proc_unit_counts[entity_ids[idx]]); component_manager->add( - std::make_shared(std::move(segment_and_slice.ranges_and_key_.row_range_)), + std::make_shared(std::move(segment_and_slices[idx].ranges_and_key_.row_range_)), entity_ids[idx]); component_manager->add( - std::make_shared(std::move(segment_and_slice.ranges_and_key_.col_range_)), + std::make_shared(std::move(segment_and_slices[idx].ranges_and_key_.col_range_)), entity_ids[idx]); component_manager->add( - std::make_shared(std::move(segment_and_slice.ranges_and_key_.key_)), + std::make_shared(std::move(segment_and_slices[idx].ranges_and_key_.key_)), entity_ids[idx]); entity_added[entity_ids[idx]] = true; } @@ -901,11 +901,11 @@ void copy_frame_data_to_buffer(const SegmentInMemory& destination, size_t target } void copy_segments_to_frame(const std::shared_ptr& store, const std::shared_ptr& pipeline_context, const SegmentInMemory& frame) { - for (auto context_row : folly::enumerate(*pipeline_context)) { - auto& slice_and_key = context_row->slice_and_key(); + for (auto context_row : *pipeline_context) { + auto& slice_and_key = context_row.slice_and_key(); auto& segment = slice_and_key.segment(store); const auto index_field_count = get_index_field_count(frame); - for (auto idx = 0u; idx < index_field_count && context_row->fetch_index(); ++idx) { + for (auto idx = 0u; idx < index_field_count && context_row.fetch_index(); ++idx) { copy_frame_data_to_buffer(frame, idx, segment, idx, slice_and_key.slice_.row_range); } @@ -915,12 +915,12 @@ void copy_segments_to_frame(const std::shared_ptr& store, const std::shar "Column range does not match segment descriptor field count in copy_segments_to_frame: {} != {}", field_count, segment.descriptor().field_count()); for (auto field_col = index_field_count; field_col < field_count; ++field_col) { - const auto& field_name = context_row->descriptor().fields(field_col).name(); + const auto& field_name = context_row.descriptor().fields(field_col).name(); auto frame_loc_opt = frame.column_index(field_name); if (!frame_loc_opt) continue; - copy_frame_data_to_buffer(frame, *frame_loc_opt, segment, field_col, context_row->slice_and_key().slice_.row_range); + copy_frame_data_to_buffer(frame, *frame_loc_opt, segment, field_col, context_row.slice_and_key().slice_.row_range); } } } @@ -1179,8 +1179,8 @@ VersionedItem collate_and_write( for(auto sk = std::begin(pipeline_context->slice_and_keys_); sk < end; ++sk) writer.add(sk->key(), sk->slice()); - for (auto key : folly::enumerate(keys)) { - writer.add(to_atom(*key), slices[key.index]); + for (auto i = 0u; i < keys.size(); ++i) { + writer.add(to_atom(keys[i]), slices[i]); } auto index_key = writer.commit(); return VersionedItem{to_atom(std::move(index_key).get())}; diff --git a/cpp/arcticdb/version/version_map_batch_methods.cpp b/cpp/arcticdb/version/version_map_batch_methods.cpp index 206c2d191b9..77a5f92a245 100644 --- a/cpp/arcticdb/version/version_map_batch_methods.cpp +++ b/cpp/arcticdb/version/version_map_batch_methods.cpp @@ -260,14 +260,15 @@ std::vector>> batch_get_versions_async( version_queries.size()); robin_hood::unordered_flat_map version_data; - for (const auto &symbol : folly::enumerate(symbols)) { - auto it = version_data.find(*symbol); + for (size_t i = 0; i < symbols.size(); i++) { + auto symbol = symbols[i]; + auto it = version_data.find(symbol); if (it == version_data.end()) { version_data.insert(robin_hood::pair( - *symbol, - StreamVersionData{version_queries[symbol.index]})); + symbol, + StreamVersionData{version_queries[i]})); } else { - it->second.react(version_queries[symbol.index]); + it->second.react(version_queries[i]); } } @@ -279,8 +280,9 @@ std::vector>> batch_get_versions_async( std::vector>> output; output.reserve(symbols.size()); - for (const auto &symbol : folly::enumerate(symbols)) { - auto version_query = version_queries[symbol.index]; + for (size_t i = 0; i < symbols.size(); i++) { + auto symbol = symbols[i]; + auto version_query = version_queries[i]; auto version_entry_fut = folly::Future::makeEmpty(); util::variant_match(version_query.content_, [&version_entry_fut, &snapshot_count_map, &snapshot_key_map, &snapshot_futures, &store]( @@ -295,14 +297,14 @@ std::vector>> batch_get_versions_async( }, [&version_entry_fut, &version_data, &symbol, &version_futures, use_previous_on_error, &store, &version_map]( const auto &) { - const auto it = version_data.find(*symbol); - util::check(it != version_data.end(), "Missing version data for symbol {}", *symbol); + const auto it = version_data.find(symbol); + util::check(it != version_data.end(), "Missing version data for symbol {}", symbol); if (use_previous_on_error.value_or(false)) it->second.load_param_.use_previous_ = true; version_entry_fut = set_up_version_future( - *symbol, + symbol, it->second, version_futures, store, @@ -311,7 +313,7 @@ std::vector>> batch_get_versions_async( }); output.push_back(std::move(version_entry_fut) - .thenValue([vq = version_query, sid = *symbol](auto version_or_snapshot) { + .thenValue([vq = version_query, sid = symbol](auto version_or_snapshot) { return util::variant_match(version_or_snapshot, [&vq](const std::shared_ptr &version_map_entry) { return get_key_for_version_query(version_map_entry, vq); diff --git a/cpp/arcticdb/version/version_map_batch_methods.hpp b/cpp/arcticdb/version/version_map_batch_methods.hpp index bbda0bffb66..2d17b4b3285 100644 --- a/cpp/arcticdb/version/version_map_batch_methods.hpp +++ b/cpp/arcticdb/version/version_map_batch_methods.hpp @@ -278,9 +278,10 @@ inline std::vector>> batch_write_and_prune_pr const std::vector& stream_update_info_vector) { std::vector>> results; results.reserve(keys.size()); - for(auto key : folly::enumerate(keys)){ - auto previous_index_key = stream_update_info_vector[key.index].previous_index_key_; - results.emplace_back(async::submit_io_task(WriteAndPrunePreviousTask{store, version_map, *key, previous_index_key})); + for(size_t i = 0; i < keys.size(); i++){ + const auto& key = keys[i]; + auto previous_index_key = stream_update_info_vector[i].previous_index_key_; + results.emplace_back(async::submit_io_task(WriteAndPrunePreviousTask{store, version_map, key, previous_index_key})); } return results; diff --git a/cpp/arcticdb/version/version_store_api.cpp b/cpp/arcticdb/version/version_store_api.cpp index 505bfba0bd4..57c1b38129d 100644 --- a/cpp/arcticdb/version/version_store_api.cpp +++ b/cpp/arcticdb/version/version_store_api.cpp @@ -89,8 +89,9 @@ std::vector PythonVersionStore::batch_write_index_keys_to_version folly::collect(batch_write_version_and_prune_if_needed(index_keys, stream_update_info_vector, prune_previous_versions)).get(); std::vector output(index_keys.size()); - for(auto key : folly::enumerate(index_keys)) - output[key.index] = *key; + for (size_t i = 0; i < index_keys.size(); i++) { + output[i] = index_keys[i]; + } std::vector> symbol_write_futs; for(const auto& item : output) { @@ -800,7 +801,8 @@ std::vector> PythonVersionStore::batch_read( auto read_versions_or_errors = batch_read_internal(stream_ids, version_queries, read_queries, read_options); std::vector> res; - for (auto&& [idx, read_version_or_error]: folly::enumerate(read_versions_or_errors)) { + for (size_t idx = 0; idx < read_versions_or_errors.size(); ++idx) { + auto& read_version_or_error = read_versions_or_errors[idx]; util::variant_match( read_version_or_error, [&res] (ReadVersionOutput& read_version) {