diff --git a/include/boost/intrusive/hashtable.hpp b/include/boost/intrusive/hashtable.hpp index ab51d1dc..3e1293bd 100644 --- a/include/boost/intrusive/hashtable.hpp +++ b/include/boost/intrusive/hashtable.hpp @@ -568,7 +568,6 @@ struct optimize_multikey_is_true static const bool value = sizeof(test(0)) > sizeof(detail::yes_type)*2u; }; -template struct insert_commit_data_impl { std::size_t hash; @@ -580,17 +579,6 @@ struct insert_commit_data_impl { hash = h; } }; -template<> -struct insert_commit_data_impl -{ - std::size_t bucket_idx; - inline std::size_t get_hash() const - { return 0U; } - - inline void set_hash(std::size_t) - {} -}; - template inline typename pointer_traits::template rebind_pointer::type dcast_bucket_ptr(const SlistNodePtr &p) @@ -2367,7 +2355,7 @@ class hashtable_impl /// @endcond public: - typedef insert_commit_data_impl insert_commit_data; + typedef insert_commit_data_impl insert_commit_data; private: void default_init_actions() @@ -2729,7 +2717,7 @@ class hashtable_impl insert_commit_data commit_data; std::pair ret = this->insert_unique_check(key_of_value()(value), commit_data); if(ret.second){ - ret.first = this->insert_unique_commit(value, commit_data); + ret.first = this->insert_unique_fast_commit(value, commit_data); } return ret; } @@ -2856,7 +2844,43 @@ class hashtable_impl //! erased between the "insert_check" and "insert_commit" calls. //! //! After a successful rehashing insert_commit_data remains valid. - iterator insert_unique_commit(reference value, const insert_commit_data &commit_data) BOOST_NOEXCEPT + iterator insert_unique_commit(reference value, const insert_commit_data& commit_data) BOOST_NOEXCEPT + { + size_type bucket_num = this->priv_hash_to_nbucket(commit_data.get_hash()); + bucket_type& b = this->priv_bucket(bucket_num); + this->priv_size_traits().increment(); + node_ptr const n = pointer_traits::pointer_to(this->priv_value_to_node(value)); + BOOST_INTRUSIVE_SAFE_HOOK_DEFAULT_ASSERT(!safemode_or_autounlink || slist_node_algorithms::unique(n)); + node_functions_t::store_hash(n, commit_data.get_hash(), store_hash_t()); + this->priv_insertion_update_cache(bucket_num); + group_functions_t::insert_in_group(n, n, optimize_multikey_t()); + slist_node_algorithms::link_after(b.get_node_ptr(), n); + return this->build_iterator(siterator(n), this->to_ptr(b)); + } + + //! Requires: value must be an lvalue of type value_type. commit_data + //! must have been obtained from a previous call to "insert_check". + //! No objects should have been inserted or erased from the unordered_set between + //! the "insert_check" that filled "commit_data" and the call to "insert_commit". + //! + //! No rehashing shall be performed between `insert_check` and `insert_fast_commit`. + //! + //! Effects: Inserts the value in the unordered_set using the information obtained + //! from the "commit_data" that a previous "insert_check" filled. + //! + //! Returns: An iterator to the newly inserted object. + //! + //! Complexity: Constant time. + //! + //! Throws: Nothing. + //! + //! Notes: This function has only sense if a "insert_check" has been + //! previously executed to fill "commit_data". No value should be inserted or + //! erased between the "insert_check" and "insert_commit" calls. + //! + //! Since this commit operation does not support rehashing between the check + //! and the commit, it's faster than `insert_commit`. + iterator insert_unique_fast_commit(reference value, const insert_commit_data &commit_data) BOOST_NOEXCEPT { this->priv_size_inc(); node_ptr const n = this->priv_value_to_node_ptr(value); diff --git a/include/boost/intrusive/unordered_set.hpp b/include/boost/intrusive/unordered_set.hpp index 67762838..c53ec527 100644 --- a/include/boost/intrusive/unordered_set.hpp +++ b/include/boost/intrusive/unordered_set.hpp @@ -222,6 +222,10 @@ class unordered_set_impl inline iterator insert_commit(reference value, const insert_commit_data &commit_data) BOOST_NOEXCEPT { return table_type::insert_unique_commit(value, commit_data); } + //! @copydoc ::boost::intrusive::hashtable::insert_unique_fast_commit + inline iterator insert_fast_commit(reference value, const insert_commit_data &commit_data) BOOST_NOEXCEPT + { return table_type::insert_unique_fast_commit(value, commit_data); } + #ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED //! @copydoc ::boost::intrusive::hashtable::erase(const_iterator) diff --git a/test/unordered_test.hpp b/test/unordered_test.hpp index 33a6679c..12574c0b 100644 --- a/test/unordered_test.hpp +++ b/test/unordered_test.hpp @@ -158,20 +158,48 @@ void test_unordered::test_insert(value_cont_type& values, deta <>::type unordered_set_type; typedef typename unordered_set_type::bucket_traits bucket_traits; typedef typename unordered_set_type::key_of_value key_of_value; + typedef typename unordered_set_type::bucket_ptr bucket_ptr; const std::size_t ExtraBuckets = unordered_set_type::bucket_overhead; typename unordered_set_type::bucket_type buckets[BucketSize + ExtraBuckets]; - unordered_set_type testset(bucket_traits( - pointer_traits:: - pointer_to(buckets[0]), sizeof(buckets)/sizeof(*buckets))); + const bucket_traits orig_bucket_traits( pointer_traits::pointer_to(buckets[0]) + , sizeof(buckets) / sizeof(*buckets)); + unordered_set_type testset(orig_bucket_traits); testset.insert(&values[0] + 2, &values[0] + 5); typename unordered_set_type::insert_commit_data commit_data; BOOST_TEST ((!testset.insert_check(key_of_value()(values[2]), commit_data).second)); BOOST_TEST (( testset.insert_check(key_of_value()(values[0]), commit_data).second)); + //Test insert_fast_commit + { + BOOST_TEST(testset.find(key_of_value()(values[0])) == testset.end()); + testset.insert_fast_commit(values[0], commit_data); + BOOST_TEST(testset.find(key_of_value()(values[0])) != testset.end()); + testset.erase(key_of_value()(values[0])); + BOOST_TEST(testset.find(key_of_value()(values[0])) == testset.end()); + } + + //Test insert_commit + BOOST_IF_CONSTEXPR(!unordered_set_type::incremental) + { + BOOST_TEST((testset.insert_check(key_of_value()(values[0]), commit_data).second)); + typename unordered_set_type::bucket_type buckets2[2U + ExtraBuckets]; + //Two rehashes to be compatible with incremental hashing + testset.rehash(bucket_traits( + pointer_traits::pointer_to(buckets2[0]), 2U + ExtraBuckets)); + testset.insert_commit(values[0], commit_data); + BOOST_TEST(testset.find(key_of_value()(values[0])) != testset.end()); + testset.erase(key_of_value()(values[0])); + BOOST_TEST(testset.find(key_of_value()(values[0])) == testset.end()); + //Two rehashes to be compatible with incremental hashing + testset.clear(); + testset.rehash(orig_bucket_traits); + testset.insert(&values[0] + 2, &values[0] + 5); + } + const unordered_set_type& const_testset = testset; - if(unordered_set_type::incremental) + BOOST_IF_CONSTEXPR(unordered_set_type::incremental) { { int init_values [] = { 4, 5, 1 }; TEST_INTRUSIVE_SEQUENCE_MAYBEUNIQUE( init_values, const_testset ); } @@ -600,7 +628,7 @@ void test_unordered::test_rehash(value_cont_type& values, deta typedef typename unordered_type::bucket_ptr bucket_ptr; typename unordered_type::bucket_type buckets1[BucketSize + ExtraBuckets]; - typename unordered_type::bucket_type buckets2 [2 + ExtraBuckets]; + typename unordered_type::bucket_type buckets2 [BucketSize / 4 + ExtraBuckets]; typename unordered_type::bucket_type buckets3[BucketSize*2 + ExtraBuckets]; unordered_type testset1(&values[0], &values[0] + 6, bucket_traits(