Skip to content

Commit

Permalink
tests for postponed index creation
Browse files Browse the repository at this point in the history
  • Loading branch information
zorancv committed May 8, 2024
1 parent 0c8d8c3 commit 979c440
Show file tree
Hide file tree
Showing 3 changed files with 106 additions and 2 deletions.
3 changes: 2 additions & 1 deletion graph/src/env/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,8 @@ impl EnvVars {
subgraph_error_retry_ceil: Duration::from_secs(inner.subgraph_error_retry_ceil_in_secs),
subgraph_error_retry_jitter: inner.subgraph_error_retry_jitter,
enable_select_by_specific_attributes: inner.enable_select_by_specific_attributes.0,
postpone_attribute_index_creation: inner.postpone_attribute_index_creation.0,
postpone_attribute_index_creation: inner.postpone_attribute_index_creation.0
|| cfg!(debug_assertions),
log_trigger_data: inner.log_trigger_data.0,
explorer_ttl: Duration::from_secs(inner.explorer_ttl_in_secs),
explorer_lock_threshold: Duration::from_millis(inner.explorer_lock_threshold_in_msec),
Expand Down
101 changes: 101 additions & 0 deletions store/postgres/src/relational/ddl_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,12 +152,42 @@ fn test_manual_index_creation_ddl() {
);
}

#[test]
fn generate_postponed_indexes() {
let layout = test_layout(THING_GQL);
let table = layout.table(&SqlName::from("Scalar")).unwrap();
let query_vec = table.create_postponed_indexes();
assert!(query_vec.len() == 7);
let queries = query_vec.join(" ");
check_eqv(THING_POSTPONED_INDEXES, &queries)
}
const THING_POSTPONED_INDEXES: &str = r#"
create index concurrently if not exists attr_1_1_scalar_bool
on "sgd0815"."scalar" using btree("bool");
create index concurrently if not exists attr_1_2_scalar_int
on "sgd0815"."scalar" using btree("int");
create index concurrently if not exists attr_1_3_scalar_big_decimal
on "sgd0815"."scalar" using btree("big_decimal");
create index concurrently if not exists attr_1_4_scalar_string
on "sgd0815"."scalar" using btree(left("string", 256));
create index concurrently if not exists attr_1_5_scalar_bytes
on "sgd0815"."scalar" using btree(substring("bytes", 1, 64));
create index concurrently if not exists attr_1_6_scalar_big_int
on "sgd0815"."scalar" using btree("big_int");
create index concurrently if not exists attr_1_7_scalar_color
on "sgd0815"."scalar" using btree("color");
"#;

#[test]
fn generate_ddl() {
let layout = test_layout(THING_GQL);
let sql = layout.as_ddl(false).expect("Failed to generate DDL");
assert_eq!(THING_DDL, &sql); // Use `assert_eq!` to also test the formatting.

let layout = test_layout(THING_GQL);
let sql = layout.as_ddl(true).expect("Failed to generate DDL");
check_eqv(THING_DDL_ON_COPY, &sql);

let layout = test_layout(MUSIC_GQL);
let sql = layout.as_ddl(false).expect("Failed to generate DDL");
check_eqv(MUSIC_DDL, &sql);
Expand Down Expand Up @@ -398,6 +428,77 @@ create index attr_2_0_file_thing_id
"#;

const THING_DDL_ON_COPY: &str = r#"create type sgd0815."color"
as enum ('BLUE', 'red', 'yellow');
create type sgd0815."size"
as enum ('large', 'medium', 'small');
create table "sgd0815"."thing" (
vid bigserial primary key,
block_range int4range not null,
"id" text not null,
"big_thing" text not null
);
alter table "sgd0815"."thing"
add constraint thing_id_block_range_excl exclude using gist (id with =, block_range with &&);
create index brin_thing
on "sgd0815"."thing"
using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops);
create index thing_block_range_closed
on "sgd0815"."thing"(coalesce(upper(block_range), 2147483647))
where coalesce(upper(block_range), 2147483647) < 2147483647;
create index attr_0_0_thing_id
on "sgd0815"."thing" using btree("id");
create index attr_0_1_thing_big_thing
on "sgd0815"."thing" using gist("big_thing", block_range);
create table "sgd0815"."scalar" (
vid bigserial primary key,
block_range int4range not null,
"id" text not null,
"bool" boolean,
"int" int4,
"big_decimal" numeric,
"string" text,
"bytes" bytea,
"big_int" numeric,
"color" "sgd0815"."color"
);
alter table "sgd0815"."scalar"
add constraint scalar_id_block_range_excl exclude using gist (id with =, block_range with &&);
create index brin_scalar
on "sgd0815"."scalar"
using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops);
create index scalar_block_range_closed
on "sgd0815"."scalar"(coalesce(upper(block_range), 2147483647))
where coalesce(upper(block_range), 2147483647) < 2147483647;
create index attr_1_0_scalar_id
on "sgd0815"."scalar" using btree("id");
create table "sgd0815"."file_thing" (
vid bigserial primary key,
block_range int4range not null,
causality_region int not null,
"id" text not null
);
alter table "sgd0815"."file_thing"
add constraint file_thing_id_block_range_excl exclude using gist (id with =, block_range with &&);
create index brin_file_thing
on "sgd0815"."file_thing"
using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops);
create index file_thing_block_range_closed
on "sgd0815"."file_thing"(coalesce(upper(block_range), 2147483647))
where coalesce(upper(block_range), 2147483647) < 2147483647;
create index attr_2_0_file_thing_id
on "sgd0815"."file_thing" using btree("id");
"#;

const BOOKS_GQL: &str = r#"type Author @entity {
id: ID!
name: String!
Expand Down
4 changes: 3 additions & 1 deletion store/postgres/src/relational/prune.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,9 @@ impl TablePair {
if catalog::table_exists(conn, dst_nsp.as_str(), &dst.name)? {
writeln!(query, "truncate table {};", dst.qualified_name)?;
} else {
dst.as_ddl(schema, catalog, false, &mut query)?; // TODO: is it a copy?
// In case of pruning we don't do delayed creation of indexes,
// as the asumption is that there is not that much data inserted.
dst.as_ddl(schema, catalog, false, &mut query)?;
}
conn.batch_execute(&query)?;

Expand Down

0 comments on commit 979c440

Please sign in to comment.