Skip to content

Commit

Permalink
Treat MVM_CALLSITE_ARG_(FLAT | NAMED) like FLAT_NAMED
Browse files Browse the repository at this point in the history
If both the FLAT and NAMED flags are set, this means exactly the same as
the FLAT_NAMED flag, so we treat them the same. Later on we can free up
the FLAT_NAMED flag. Doing this in two steps to avoid bootstrapping issues.
  • Loading branch information
niner committed Jan 7, 2022
1 parent 1b1b7d5 commit 9c2df85
Show file tree
Hide file tree
Showing 3 changed files with 148 additions and 63 deletions.
146 changes: 106 additions & 40 deletions src/core/args.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,21 +81,37 @@ MVMCallStackFlattening * MVM_args_perform_flattening(MVMThreadContext *tc, MVMCa
for (i = 0; i < cs->flag_count; i++) {
MVMCallsiteFlags flag = cs->arg_flags[i];
if (flag & MVM_CALLSITE_ARG_FLAT) {
/* Positional flattening. */
MVMObject *list = source[map[i]].o;
MVMuint32 repr = REPR(list)->ID;
if ((repr != MVM_REPR_ID_VMArray && repr != MVM_REPR_ID_MultiDimArray)
|| !IS_CONCRETE(list))
MVM_exception_throw_adhoc(tc,
"Argument flattening array must be a concrete VMArray or MultiDimArray");
MVMint64 elems = REPR(list)->elems(tc, STABLE(list), list, OBJECT_BODY(list));
if (elems > MVM_ARGS_LIMIT)
MVM_exception_throw_adhoc(tc,
"Flattened array has %"PRId64" elements, but argument lists are limited to %"PRId32"",
elems, MVM_ARGS_LIMIT);
flatten_counts[i] = (MVMuint16)elems;
num_pos += (MVMuint16)elems;
num_args += (MVMuint16)elems;
if (flag & MVM_CALLSITE_ARG_NAMED) {
/* Named flattening. */
MVMObject *hash = source[map[i]].o;
if (REPR(hash)->ID != MVM_REPR_ID_MVMHash || !IS_CONCRETE(hash))
MVM_exception_throw_adhoc(tc,
"Argument flattening hash must be a concrete VMHash");
MVMint64 elems = REPR(hash)->elems(tc, STABLE(hash), hash, OBJECT_BODY(hash));
if (elems > MVM_ARGS_LIMIT)
MVM_exception_throw_adhoc(tc,
"Flattened hash has %"PRId64" elements, but argument lists are limited to %"PRId32"",
elems, MVM_ARGS_LIMIT);
flatten_counts[i] = (MVMuint16)elems;
num_args += (MVMuint16)elems;
}
else {
/* Positional flattening. */
MVMObject *list = source[map[i]].o;
MVMuint32 repr = REPR(list)->ID;
if ((repr != MVM_REPR_ID_VMArray && repr != MVM_REPR_ID_MultiDimArray)
|| !IS_CONCRETE(list))
MVM_exception_throw_adhoc(tc,
"Argument flattening array must be a concrete VMArray or MultiDimArray");
MVMint64 elems = REPR(list)->elems(tc, STABLE(list), list, OBJECT_BODY(list));
if (elems > MVM_ARGS_LIMIT)
MVM_exception_throw_adhoc(tc,
"Flattened array has %"PRId64" elements, but argument lists are limited to %"PRId32"",
elems, MVM_ARGS_LIMIT);
flatten_counts[i] = (MVMuint16)elems;
num_pos += (MVMuint16)elems;
num_args += (MVMuint16)elems;
}
}
else if (flag & MVM_CALLSITE_ARG_FLAT_NAMED) {
/* Named flattening. */
Expand Down Expand Up @@ -136,32 +152,82 @@ MVMCallStackFlattening * MVM_args_perform_flattening(MVMThreadContext *tc, MVMCa
for (i = 0; i < cs->flag_count; i++) {
MVMCallsiteFlags flag = cs->arg_flags[i];
if (flag & MVM_CALLSITE_ARG_FLAT) {
/* Positional flattening. */
if (flatten_counts[i] == 0)
continue;
MVMuint16 j;
MVMObject *list = source[map[i]].o;
MVMStorageSpec lss = REPR(list)->pos_funcs.get_elem_storage_spec(tc, STABLE(list));
for (j = 0; j < flatten_counts[i]; j++) {
switch (lss.inlineable ? lss.boxed_primitive : 0) {
case MVM_STORAGE_SPEC_BP_INT:
record->produced_cs.arg_flags[cur_new_arg] = MVM_CALLSITE_ARG_INT;
record->arg_info.source[cur_new_arg].i64 = MVM_repr_at_pos_i(tc, list, j);
break;
case MVM_STORAGE_SPEC_BP_NUM:
record->produced_cs.arg_flags[cur_new_arg] = MVM_CALLSITE_ARG_NUM;
record->arg_info.source[cur_new_arg].n64 = MVM_repr_at_pos_n(tc, list, j);
break;
case MVM_STORAGE_SPEC_BP_STR:
record->produced_cs.arg_flags[cur_new_arg] = MVM_CALLSITE_ARG_STR;
record->arg_info.source[cur_new_arg].s = MVM_repr_at_pos_s(tc, list, j);
break;
default:
record->produced_cs.arg_flags[cur_new_arg] = MVM_CALLSITE_ARG_OBJ;
record->arg_info.source[cur_new_arg].o = MVM_repr_at_pos_o(tc, list, j);
break;
if (flag & MVM_CALLSITE_ARG_NAMED) {
/* Named flattening. Hash randomization means that iterating the
* hash can produce many orders of the same keys, which will ruin
* our hit rate on the callsite intern cache (and in turn cause
* fake megamorphic blowups at callsites). Thus we sort the keys;
* by hash code shall suffice, since collisions are unlikely. */
MVMuint32 limit = flatten_counts[i];
if (limit == 0)
continue;
ArgNameAndValue *anv = alloca(limit * sizeof(ArgNameAndValue));
MVMObject *hash = source[map[i]].o;
MVMHashBody *body = &((MVMHash *)hash)->body;
MVMStrHashTable *hashtable = &(body->hashtable);
MVMStrHashIterator iterator = MVM_str_hash_first(tc, hashtable);
MVMuint32 seen = 0;
while (seen < limit && /* Defend against hash changes */
!MVM_str_hash_at_end(tc, hashtable, iterator)) {
MVMHashEntry *current = MVM_str_hash_current_nocheck(tc,
hashtable, iterator);
anv[seen].name = current->hash_handle.key;
anv[seen].value = current->value;
seen++;
iterator = MVM_str_hash_next(tc, hashtable, iterator);
}
qsort(anv, seen, sizeof(ArgNameAndValue), key_sort_by_hash);
MVMuint32 j;
for (j = 0; j < seen; j++) {
MVMString *arg_name = anv[j].name;
MVMint32 already_index = callsite_name_index(tc, record, cur_new_name,
arg_name);
if (already_index < 0) {
/* Didn't see this name yet, so add to callsite and args. */
record->produced_cs.arg_flags[cur_new_arg] =
MVM_CALLSITE_ARG_NAMED | MVM_CALLSITE_ARG_OBJ;
record->arg_info.source[cur_new_arg].o = anv[j].value;
cur_new_arg++;
record->produced_cs.arg_names[cur_new_name] = arg_name;
cur_new_name++;
}
else {
/* New value for an existing name; replace the value and
* ensure correct type flag. */
record->produced_cs.arg_flags[already_index] =
MVM_CALLSITE_ARG_NAMED | MVM_CALLSITE_ARG_OBJ;
record->arg_info.source[already_index].o = anv[j].value;
}
}
}
else {
/* Positional flattening. */
if (flatten_counts[i] == 0)
continue;
MVMuint16 j;
MVMObject *list = source[map[i]].o;
MVMStorageSpec lss = REPR(list)->pos_funcs.get_elem_storage_spec(tc, STABLE(list));
for (j = 0; j < flatten_counts[i]; j++) {
switch (lss.inlineable ? lss.boxed_primitive : 0) {
case MVM_STORAGE_SPEC_BP_INT:
record->produced_cs.arg_flags[cur_new_arg] = MVM_CALLSITE_ARG_INT;
record->arg_info.source[cur_new_arg].i64 = MVM_repr_at_pos_i(tc, list, j);
break;
case MVM_STORAGE_SPEC_BP_NUM:
record->produced_cs.arg_flags[cur_new_arg] = MVM_CALLSITE_ARG_NUM;
record->arg_info.source[cur_new_arg].n64 = MVM_repr_at_pos_n(tc, list, j);
break;
case MVM_STORAGE_SPEC_BP_STR:
record->produced_cs.arg_flags[cur_new_arg] = MVM_CALLSITE_ARG_STR;
record->arg_info.source[cur_new_arg].s = MVM_repr_at_pos_s(tc, list, j);
break;
default:
record->produced_cs.arg_flags[cur_new_arg] = MVM_CALLSITE_ARG_OBJ;
record->arg_info.source[cur_new_arg].o = MVM_repr_at_pos_o(tc, list, j);
break;
}
cur_new_arg++;
}
cur_new_arg++;
}
}
else if (flag & MVM_CALLSITE_ARG_FLAT_NAMED) {
Expand Down
63 changes: 41 additions & 22 deletions src/core/bytecode.c
Original file line number Diff line number Diff line change
Expand Up @@ -864,34 +864,53 @@ static MVMCallsite ** deserialize_callsites(MVMThreadContext *tc, MVMCompUnit *c
* before all nameds (flattening named counts as named). */
for (j = 0; j < elems; j++) {
if (callsites[i]->arg_flags[j] & MVM_CALLSITE_ARG_FLAT) {
if (!(callsites[i]->arg_flags[j] & MVM_CALLSITE_ARG_OBJ)) {
MVMuint32 k;
for (k = 0; k <= i; k++) {
if (!callsites[i]->is_interned) {
MVM_free(callsites[i]->arg_flags);
MVM_free_null(callsites[i]);
if (callsites[i]->arg_flags[j] & MVM_CALLSITE_ARG_NAMED) {
if (!(callsites[i]->arg_flags[j] & MVM_CALLSITE_ARG_OBJ)) {
MVMuint32 k;
for (k = 0; k <= i; k++) {
if (!callsites[i]->is_interned) {
MVM_free(callsites[i]->arg_flags);
MVM_free_null(callsites[i]);
}
}
MVM_fixed_size_free(tc, tc->instance->fsa,
sizeof(MVMCallsite *) * rs->expected_callsites,
callsites);
MVM_exception_throw_adhoc(tc, "Flattened named args must be objects");
}
MVM_fixed_size_free(tc, tc->instance->fsa,
sizeof(MVMCallsite *) * rs->expected_callsites,
callsites);
MVM_exception_throw_adhoc(tc, "Flattened positional args must be objects");
has_flattening = 1;
nameds_slots++;
}
if (nameds_slots) {
MVMuint32 k;
for (k = 0; k <= i; k++) {
if (!callsites[i]->is_interned) {
MVM_free(callsites[i]->arg_flags);
MVM_free_null(callsites[i]);
else {
if (!(callsites[i]->arg_flags[j] & MVM_CALLSITE_ARG_OBJ)) {
MVMuint32 k;
for (k = 0; k <= i; k++) {
if (!callsites[i]->is_interned) {
MVM_free(callsites[i]->arg_flags);
MVM_free_null(callsites[i]);
}
}
MVM_fixed_size_free(tc, tc->instance->fsa,
sizeof(MVMCallsite *) * rs->expected_callsites,
callsites);
MVM_exception_throw_adhoc(tc, "Flattened positional args must be objects");
}
MVM_fixed_size_free(tc, tc->instance->fsa,
sizeof(MVMCallsite *) * rs->expected_callsites,
callsites);
MVM_exception_throw_adhoc(tc, "Flattened positional args must appear before named args");
if (nameds_slots) {
MVMuint32 k;
for (k = 0; k <= i; k++) {
if (!callsites[i]->is_interned) {
MVM_free(callsites[i]->arg_flags);
MVM_free_null(callsites[i]);
}
}
MVM_fixed_size_free(tc, tc->instance->fsa,
sizeof(MVMCallsite *) * rs->expected_callsites,
callsites);
MVM_exception_throw_adhoc(tc, "Flattened positional args must appear before named args");
}
has_flattening = 1;
positionals++;
}
has_flattening = 1;
positionals++;
}
else if (callsites[i]->arg_flags[j] & MVM_CALLSITE_ARG_FLAT_NAMED) {
if (!(callsites[i]->arg_flags[j] & MVM_CALLSITE_ARG_OBJ)) {
Expand Down
2 changes: 1 addition & 1 deletion src/core/callsite.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ MVM_STATIC_INLINE MVMuint16 MVM_callsite_num_nameds(MVMThreadContext *tc, const
MVMuint16 i = cs->num_pos;
MVMuint16 nameds = 0;
while (i < cs->flag_count) {
if (!(cs->arg_flags[i] & MVM_CALLSITE_ARG_FLAT_NAMED))
if (!(cs->arg_flags[i] & (MVM_CALLSITE_ARG_FLAT_NAMED | MVM_CALLSITE_ARG_FLAT)))
nameds++;
i++;
}
Expand Down

0 comments on commit 9c2df85

Please sign in to comment.