Permalink
Browse files

Revert " Introduce table improvement by Vladimir Makarov <vmakarov@re…

…dhat.com>."

This reverts commit 7577515.

Reversion performed quickly and roughly by Noah Gibbs, with some questionable resolutions of conflicts.
  • Loading branch information...
noahgibbs committed Jun 2, 2017
1 parent 321300d commit bcfaf94c515cc6ea8cb62f6d7ce460401c3758d1
Showing with 1,114 additions and 1,566 deletions.
  1. +1 −7 NEWS
  2. +6 −6 ext/-test-/st/foreach/foreach.c
  3. +34 −51 hash.c
  4. +28 −26 include/ruby/st.h
  5. +6 −2 insns.def
  6. +0 −4 internal.h
  7. +6 −1 numeric.c
  8. +1,028 −1,468 st.c
  9. +5 −1 vm.c
View
8 NEWS
@@ -142,10 +142,4 @@ with all sufficient information, see the ChangeLog file or Redmine
* Print backtrace and error message in reverse order if STDERR is unchanged and a tty.
[Feature #8661] [experimental]
* configure option --with-ext now mandates its arguments. So for
instance if you run ./configure --with-ext=openssl,+ then the
openssl library is guaranteed compiled, otherwise the build fails
abnormally.
Note however to always add the ",+" at the end of the argument.
Otherwise nothing but openssl are built. [Feature #13302]
* Thread deadlock detection now shows their backtrace and dependency. [Feature #8214]
@@ -14,13 +14,13 @@ force_unpack_check(struct checker *c, st_data_t key, st_data_t val)
if (c->nr == 0) {
st_data_t i;
if (c->tbl->bins != NULL) rb_bug("should be packed\n");
if (!c->tbl->entries_packed) rb_bug("should be packed\n");
/* force unpacking during iteration: */
for (i = 1; i < expect_size; i++)
st_add_direct(c->tbl, i, i);
if (c->tbl->bins == NULL) rb_bug("should be unpacked\n");
if (c->tbl->entries_packed) rb_bug("should be unpacked\n");
}
if (key != c->nr) {
@@ -84,7 +84,7 @@ unp_fec(VALUE self, VALUE test)
st_add_direct(tbl, 0, 0);
if (tbl->bins != NULL) rb_bug("should still be packed\n");
if (!tbl->entries_packed) rb_bug("should still be packed\n");
st_foreach_check(tbl, unp_fec_i, (st_data_t)&c, -1);
@@ -98,7 +98,7 @@ unp_fec(VALUE self, VALUE test)
(VALUE)c.nr, (VALUE)expect_size);
}
if (tbl->bins == NULL) rb_bug("should be unpacked\n");
if (tbl->entries_packed) rb_bug("should be unpacked\n");
st_free_table(tbl);
@@ -145,7 +145,7 @@ unp_fe(VALUE self, VALUE test)
st_add_direct(tbl, 0, 0);
if (tbl->bins != NULL) rb_bug("should still be packed\n");
if (!tbl->entries_packed) rb_bug("should still be packed\n");
st_foreach(tbl, unp_fe_i, (st_data_t)&c);
@@ -159,7 +159,7 @@ unp_fe(VALUE self, VALUE test)
(VALUE)c.nr, (VALUE)expect_size);
}
if (tbl->bins == NULL) rb_bug("should be unpacked\n");
if (tbl->entries_packed) rb_bug("should be unpacked\n");
st_free_table(tbl);
View
85 hash.c
@@ -139,30 +139,14 @@ rb_hash(VALUE obj)
long rb_objid_hash(st_index_t index);
long
rb_dbl_long_hash(double d)
{
/* normalize -0.0 to 0.0 */
if (d == 0.0) d = 0.0;
#if SIZEOF_INT == SIZEOF_VOIDP
return rb_memhash(&d, sizeof(d));
#else
{
union {double d; uint64_t i;} u;
u.d = d;
return rb_objid_hash(rb_hash_start(u.i));
}
#endif
}
static inline long
static st_index_t
any_hash(VALUE a, st_index_t (*other_func)(VALUE))
{
VALUE hval;
st_index_t hnum;
if (SPECIAL_CONST_P(a)) {
if (a == Qundef) return 0;
if (STATIC_SYM_P(a)) {
hnum = a >> (RUBY_SPECIAL_SHIFT + ID_SCOPE_SHIFT);
hnum = rb_hash_start(hnum);
@@ -186,7 +170,8 @@ any_hash(VALUE a, st_index_t (*other_func)(VALUE))
}
else if (BUILTIN_TYPE(a) == T_FLOAT) {
flt:
hnum = rb_dbl_long_hash(rb_float_value(a));
hval = rb_dbl_hash(rb_float_value(a));
hnum = FIX2LONG(hval);
}
else {
hnum = other_func(a);
@@ -209,42 +194,34 @@ rb_any_hash(VALUE a)
return any_hash(a, obj_any_hash);
}
/* Here is a hash function for 64-bit key. It is about 5 times faster
(2 times faster when uint128 type is absent) on Haswell than
tailored Spooky or City hash function can be. */
/* Here we two primes with random bit generation. */
static const uint64_t prime1 = ((uint64_t)0x2e0bb864 << 32) | 0xe9ea7df5;
static const uint64_t prime2 = ((uint64_t)0xcdb32970 << 32) | 0x830fcaa1;
static inline uint64_t
mult_and_mix(uint64_t m1, uint64_t m2)
{
#if defined(__GNUC__) && UINT_MAX != ULONG_MAX
__uint128_t r = (__uint128_t) m1 * (__uint128_t) m2;
return (uint64_t) (r >> 64) ^ (uint64_t) r;
#else
uint64_t hm1 = m1 >> 32, hm2 = m2 >> 32;
uint64_t lm1 = m1, lm2 = m2;
uint64_t v64_128 = hm1 * hm2;
uint64_t v32_96 = hm1 * lm2 + lm1 * hm2;
uint64_t v1_32 = lm1 * lm2;
return (v64_128 + (v32_96 >> 32)) ^ ((v32_96 << 32) + v1_32);
#endif
}
static inline uint64_t
key64_hash(uint64_t key, uint32_t seed)
static st_index_t
rb_num_hash_start(st_index_t n)
{
return mult_and_mix(key + seed, prime1);
/*
* This hash function is lightly-tuned for Ruby. Further tuning
* should be possible. Notes:
*
* - (n >> 3) alone is great for heap objects and OK for fixnum,
* however symbols perform poorly.
* - (n >> (RUBY_SPECIAL_SHIFT+3)) was added to make symbols hash well,
* n.b.: +3 to remove most ID scope, +1 worked well initially, too
* n.b.: +1 (instead of 3) worked well initially, too
* - (n << 16) was finally added to avoid losing bits for fixnums
* - avoid expensive modulo instructions, it is currently only
* shifts and bitmask operations.
*/
return (n >> (RUBY_SPECIAL_SHIFT + 3) ^ (n << 16)) ^ (n >> 3);
}
long
rb_objid_hash(st_index_t index)
{
return (long)key64_hash(rb_hash_start(index), (uint32_t)prime2);
st_index_t hnum = rb_num_hash_start(index);
hnum = rb_hash_start(hnum);
hnum = rb_hash_uint(hnum, (st_index_t)rb_any_hash);
hnum = rb_hash_end(hnum);
return hnum;
}
static st_index_t
@@ -281,7 +258,7 @@ rb_ident_hash(st_data_t n)
}
#endif
return (st_index_t)key64_hash(rb_hash_start((st_index_t)n), (uint32_t)prime2);
return (st_index_t)rb_num_hash_start((st_index_t)n);
}
static const struct st_hash_type identhash = {
@@ -650,6 +627,7 @@ static VALUE
rb_hash_s_create(int argc, VALUE *argv, VALUE klass)
{
VALUE hash, tmp;
int i;
if (argc == 1) {
tmp = rb_hash_s_try_convert(Qnil, argv[0]);
@@ -703,7 +681,12 @@ rb_hash_s_create(int argc, VALUE *argv, VALUE klass)
}
hash = hash_alloc(klass);
rb_hash_bulk_insert(argc, argv, hash);
if (argc > 0) {
RHASH(hash)->ntbl = st_init_table_with_size(&objhash, argc / 2);
}
for (i=0; i<argc; i+=2) {
rb_hash_aset(hash, argv[i], argv[i + 1]);
}
return hash;
}
View
@@ -1,8 +1,6 @@
/* This is a public domain general purpose hash table package
originally written by Peter Moore @ UCB.
/* This is a public domain general purpose hash table package written by Peter Moore @ UCB. */
The hash table data strutures were redesigned and the package was
rewritten by Vladimir Makarov <vmakarov@redhat.com>. */
/* @(#) st.h 5.1 89/12/14 */
#ifndef RUBY_ST_H
#define RUBY_ST_H 1
@@ -48,10 +46,6 @@ typedef unsigned LONG_LONG st_data_t;
typedef struct st_table st_table;
typedef st_data_t st_index_t;
/* Maximal value of unsigned integer type st_index_t. */
#define MAX_ST_INDEX_VAL (~(st_index_t) 0)
typedef int st_compare_func(st_data_t, st_data_t);
typedef st_index_t st_hash_func(st_data_t);
@@ -72,26 +66,33 @@ struct st_hash_type {
# define ST_DATA_COMPATIBLE_P(type) 0
#endif
typedef struct st_table_entry st_table_entry;
struct st_table_entry; /* defined in st.c */
struct st_table {
/* Cached features of the table -- see st.c for more details. */
unsigned char entry_power, bin_power, size_ind;
/* How many times the table was rebuilt. */
unsigned int rebuilds_num;
const struct st_hash_type *type;
/* Number of entries currently in the table. */
st_index_t num_entries;
/* Array of bins used for access by keys. */
st_index_t *bins;
/* Start and bound index of entries in array entries.
entries_starts and entries_bound are in interval
[0,allocated_entries]. */
st_index_t entries_start, entries_bound;
/* Array of size 2^entry_power. */
st_table_entry *entries;
st_index_t num_bins;
unsigned int entries_packed : 1;
#ifdef __GNUC__
/*
* C spec says,
* A bit-field shall have a type that is a qualified or unqualified
* version of _Bool, signed int, unsigned int, or some other
* implementation-defined type. It is implementation-defined whether
* atomic types are permitted.
* In short, long and long long bit-field are implementation-defined
* feature. Therefore we want to suppress a warning explicitly.
*/
__extension__
#endif
st_index_t num_entries : ST_INDEX_BITS - 1;
union {
struct {
struct st_table_entry **bins;
void *private_list_head[2];
} big;
struct {
struct st_packed_entry *entries;
st_index_t real_entries;
} packed;
} as;
};
#define st_is_member(table,key) st_lookup((table),(key),(st_data_t *)0)
@@ -120,6 +121,7 @@ typedef int st_update_callback_func(st_data_t *key, st_data_t *value, st_data_t
int st_update(st_table *table, st_data_t key, st_update_callback_func *func, st_data_t arg);
int st_foreach(st_table *, int (*)(ANYARGS), st_data_t);
int st_foreach_check(st_table *, int (*)(ANYARGS), st_data_t, st_data_t);
int st_reverse_foreach(st_table *, int (*)(ANYARGS), st_data_t);
st_index_t st_keys(st_table *table, st_data_t *keys, st_index_t size);
st_index_t st_keys_check(st_table *table, st_data_t *keys, st_index_t size, st_data_t never);
st_index_t st_values(st_table *table, st_data_t *values, st_index_t size);
View
@@ -496,12 +496,16 @@ newhash
(...)
(VALUE val) // inc += 1 - num;
{
rb_num_t i;
RUBY_DTRACE_CREATE_HOOK(HASH, num);
val = rb_hash_new();
if (num) {
rb_hash_bulk_insert(num, STACK_ADDR_FROM_TOP(num), val);
for (i = num; i > 0; i -= 2) {
const VALUE v = TOPN(i - 2);
const VALUE k = TOPN(i - 1);
rb_hash_aset(val, k, v);
}
POPN(num);
}
View
@@ -1230,7 +1230,6 @@ VALUE rb_hash_has_key(VALUE hash, VALUE key);
VALUE rb_hash_default_value(VALUE hash, VALUE key);
VALUE rb_hash_set_default_proc(VALUE hash, VALUE proc);
long rb_objid_hash(st_index_t index);
long rb_dbl_long_hash(double d);
st_table *rb_init_identtable(void);
st_table *rb_init_identtable_with_size(st_index_t size);
VALUE rb_hash_compare_by_id_p(VALUE hash);
@@ -1559,9 +1558,6 @@ extern int ruby_enable_coredump;
int rb_get_next_signal(void);
int rb_sigaltstack_size(void);
/* st.c */
extern void rb_hash_bulk_insert(long, const VALUE *, VALUE);
/* strftime.c */
#ifdef RUBY_ENCODING_H
VALUE rb_strftime_timespec(const char *format, size_t format_len, rb_encoding *enc,
View
@@ -1418,7 +1418,12 @@ flo_hash(VALUE num)
VALUE
rb_dbl_hash(double d)
{
return LONG2FIX(rb_dbl_long_hash(d));
st_index_t hash;
/* normalize -0.0 to 0.0 */
if (d == 0.0) d = 0.0;
hash = rb_memhash(&d, sizeof(d));
return ST2FIX(hash);
}
VALUE
Oops, something went wrong.

0 comments on commit bcfaf94

Please sign in to comment.