@@ -1222,23 +1222,30 @@ template <typename VALUE_SIZE_FUNC>
1222
1222
inline TableStatistics ConcurrentHashTable<CONFIG, F>::
1223
1223
statistics_calculate (Thread* thread, VALUE_SIZE_FUNC& vs_f)
1224
1224
{
1225
+ constexpr size_t batch_size = 128 ;
1225
1226
NumberSeq summary;
1226
1227
size_t literal_bytes = 0 ;
1227
1228
InternalTable* table = get_table ();
1228
- for (size_t bucket_it = 0 ; bucket_it < table->_size ; bucket_it++) {
1229
+ size_t num_batches = table->_size / batch_size;
1230
+ for (size_t batch_start = 0 ; batch_start < _table->_size ; batch_start += batch_size) {
1231
+ // We batch the use of ScopedCS here as it has been found to be quite expensive to
1232
+ // invoke it for every single bucket.
1233
+ size_t batch_end = MIN2 (batch_start + batch_size, _table->_size );
1229
1234
ScopedCS cs (thread, this );
1230
- size_t count = 0 ;
1231
- Bucket* bucket = table->get_bucket (bucket_it);
1232
- if (bucket->have_redirect () || bucket->is_locked ()) {
1233
- continue ;
1234
- }
1235
- Node* current_node = bucket->first ();
1236
- while (current_node != nullptr ) {
1237
- ++count;
1238
- literal_bytes += vs_f (current_node->value ());
1239
- current_node = current_node->next ();
1235
+ for (size_t bucket_it = batch_start; bucket_it < batch_end; bucket_it++) {
1236
+ size_t count = 0 ;
1237
+ Bucket* bucket = table->get_bucket (bucket_it);
1238
+ if (bucket->have_redirect () || bucket->is_locked ()) {
1239
+ continue ;
1240
+ }
1241
+ Node* current_node = bucket->first ();
1242
+ while (current_node != nullptr ) {
1243
+ ++count;
1244
+ literal_bytes += vs_f (current_node->value ());
1245
+ current_node = current_node->next ();
1246
+ }
1247
+ summary.add ((double )count);
1240
1248
}
1241
- summary.add ((double )count);
1242
1249
}
1243
1250
1244
1251
if (_stats_rate == nullptr ) {
0 commit comments