@@ -1197,23 +1197,30 @@ template <typename VALUE_SIZE_FUNC>
1197
1197
inline TableStatistics ConcurrentHashTable<CONFIG, F>::
1198
1198
statistics_calculate (Thread* thread, VALUE_SIZE_FUNC& vs_f)
1199
1199
{
1200
+ constexpr size_t batch_size = 128 ;
1200
1201
NumberSeq summary;
1201
1202
size_t literal_bytes = 0 ;
1202
1203
InternalTable* table = get_table ();
1203
- for (size_t bucket_it = 0 ; bucket_it < table->_size ; bucket_it++) {
1204
+ size_t num_batches = table->_size / batch_size;
1205
+ for (size_t batch_start = 0 ; batch_start < _table->_size ; batch_start += batch_size) {
1206
+ // We batch the use of ScopedCS here as it has been found to be quite expensive to
1207
+ // invoke it for every single bucket.
1208
+ size_t batch_end = MIN2 (batch_start + batch_size, _table->_size );
1204
1209
ScopedCS cs (thread, this );
1205
- size_t count = 0 ;
1206
- Bucket* bucket = table->get_bucket (bucket_it);
1207
- if (bucket->have_redirect () || bucket->is_locked ()) {
1208
- continue ;
1209
- }
1210
- Node* current_node = bucket->first ();
1211
- while (current_node != NULL ) {
1212
- ++count;
1213
- literal_bytes += vs_f (current_node->value ());
1214
- current_node = current_node->next ();
1210
+ for (size_t bucket_it = batch_start; bucket_it < batch_end; bucket_it++) {
1211
+ size_t count = 0 ;
1212
+ Bucket* bucket = table->get_bucket (bucket_it);
1213
+ if (bucket->have_redirect () || bucket->is_locked ()) {
1214
+ continue ;
1215
+ }
1216
+ Node* current_node = bucket->first ();
1217
+ while (current_node != nullptr ) {
1218
+ ++count;
1219
+ literal_bytes += vs_f (current_node->value ());
1220
+ current_node = current_node->next ();
1221
+ }
1222
+ summary.add ((double )count);
1215
1223
}
1216
- summary.add ((double )count);
1217
1224
}
1218
1225
1219
1226
return TableStatistics (_stats_rate, summary, literal_bytes, sizeof (Bucket), sizeof (Node));
0 commit comments