Skip to content

Commit

Permalink
Speed up shape transitions
Browse files Browse the repository at this point in the history
This commit significantly speeds up shape transitions as it changes
get_next_shape_internal to not perform a lookup (and instead require
the caller to perform the lookup). This avoids double lookups during
shape transitions.

There is a significant (~2x) speedup in the following micro-benchmark:

    puts(Benchmark.measure do
      o = Object.new

      100_000.times do |i|
        o.instance_variable_set(:"@A#{i}", 0)
      end
    end)

Before:

    22.393194   0.201639  22.594833 ( 22.684237)

After:

    11.323086   0.022284  11.345370 ( 11.389346)
  • Loading branch information
peterzhu2118 committed Nov 17, 2022
1 parent 189e3c0 commit e3ea1f9
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 71 deletions.
93 changes: 32 additions & 61 deletions shape.c
Expand Up @@ -97,73 +97,48 @@ rb_shape_get_shape(VALUE obj)
return rb_shape_get_shape_by_id(rb_shape_get_shape_id(obj));
}

static rb_shape_t *
rb_shape_lookup_id(rb_shape_t* shape, ID id, enum shape_type shape_type)
{
while (shape->parent_id != INVALID_SHAPE_ID) {
if (shape->edge_name == id) {
// If the shape type is different, we don't
// want this to count as a "found" ID
if (shape_type == (enum shape_type)shape->type) {
return shape;
}
else {
return NULL;
}
}
shape = rb_shape_get_parent(shape);
}
return NULL;
}

static rb_shape_t*
get_next_shape_internal(rb_shape_t * shape, ID id, enum shape_type shape_type)
{
rb_shape_t *res = NULL;
RB_VM_LOCK_ENTER();
{
if (rb_shape_lookup_id(shape, id, shape_type)) {
// If shape already contains the ivar that is being set, we'll return shape
res = shape;
if (!shape->edges) {
shape->edges = rb_id_table_create(0);
}
else {
if (!shape->edges) {
shape->edges = rb_id_table_create(0);

// Lookup the shape in edges - if there's already an edge and a corresponding shape for it,
// we can return that. Otherwise, we'll need to get a new shape
if (!rb_id_table_lookup(shape->edges, id, (VALUE *)&res)) {
// In this case, the shape exists, but the shape is garbage, so we need to recreate it
if (res) {
rb_id_table_delete(shape->edges, id);
res->parent_id = INVALID_SHAPE_ID;
}

// Lookup the shape in edges - if there's already an edge and a corresponding shape for it,
// we can return that. Otherwise, we'll need to get a new shape
if (!rb_id_table_lookup(shape->edges, id, (VALUE *)&res)) {
// In this case, the shape exists, but the shape is garbage, so we need to recreate it
if (res) {
rb_id_table_delete(shape->edges, id);
res->parent_id = INVALID_SHAPE_ID;
}

rb_shape_t * new_shape = rb_shape_alloc(id, shape);

new_shape->type = (uint8_t)shape_type;
new_shape->capacity = shape->capacity;

switch (shape_type) {
case SHAPE_IVAR:
new_shape->next_iv_index = shape->next_iv_index + 1;
break;
case SHAPE_CAPACITY_CHANGE:
case SHAPE_IVAR_UNDEF:
case SHAPE_FROZEN:
new_shape->next_iv_index = shape->next_iv_index;
break;
case SHAPE_INITIAL_CAPACITY:
case SHAPE_ROOT:
rb_bug("Unreachable");
break;
}

rb_id_table_insert(shape->edges, id, (VALUE)new_shape);

res = new_shape;
rb_shape_t * new_shape = rb_shape_alloc(id, shape);

new_shape->type = (uint8_t)shape_type;
new_shape->capacity = shape->capacity;

switch (shape_type) {
case SHAPE_IVAR:
new_shape->next_iv_index = shape->next_iv_index + 1;
break;
case SHAPE_CAPACITY_CHANGE:
case SHAPE_IVAR_UNDEF:
case SHAPE_FROZEN:
new_shape->next_iv_index = shape->next_iv_index;
break;
case SHAPE_INITIAL_CAPACITY:
case SHAPE_ROOT:
rb_bug("Unreachable");
break;
}

rb_id_table_insert(shape->edges, id, (VALUE)new_shape);

res = new_shape;
}
}
RB_VM_LOCK_LEAVE();
Expand All @@ -181,10 +156,6 @@ rb_shape_transition_shape_remove_ivar(VALUE obj, ID id, rb_shape_t *shape)
{
rb_shape_t * next_shape = get_next_shape_internal(shape, id, SHAPE_IVAR_UNDEF);

if (shape == next_shape) {
return;
}

rb_shape_set_shape(obj, next_shape);
}

Expand Down
25 changes: 15 additions & 10 deletions variable.c
Expand Up @@ -1276,28 +1276,33 @@ static void
generic_ivar_set(VALUE obj, ID id, VALUE val)
{
struct ivar_update ivup;

attr_index_t index;
// The returned shape will have `id` in its iv_table
rb_shape_t * shape = rb_shape_get_next(rb_shape_get_shape(obj), obj, id);
rb_shape_t *shape = rb_shape_get_shape(obj);
bool found = rb_shape_get_iv_index(shape, id, &index);
if (!found) {
index = shape->next_iv_index;
shape = rb_shape_get_next(shape, obj, id);
RUBY_ASSERT(index == (shape->next_iv_index - 1));
}

ivup.shape = shape;

RB_VM_LOCK_ENTER();
{
attr_index_t ent_data;
if (rb_shape_get_iv_index(shape, id, &ent_data)) {
ivup.iv_index = (uint32_t) ent_data;
}
else {
rb_bug("unreachable. Shape was not found for id: %s", rb_id2name(id));
}
ivup.iv_index = (uint32_t)index;

st_update(generic_ivtbl(obj, id, false), (st_data_t)obj, generic_ivar_update, (st_data_t)&ivup);
}
RB_VM_LOCK_LEAVE();

ivup.ivtbl->ivptr[ivup.iv_index] = val;

rb_shape_set_shape(obj, shape);
RB_OBJ_WRITTEN(obj, Qundef, val);

if (!found) {
rb_shape_set_shape(obj, shape);
}
}

static VALUE *
Expand Down

0 comments on commit e3ea1f9

Please sign in to comment.