Skip to content

Commit

Permalink
Merge pull request #427 from Altinity/fix_unjustified_xfails
Browse files Browse the repository at this point in the history
Fix unjustified xfails
  • Loading branch information
Selfeer committed Dec 26, 2023
2 parents 94f2435 + fe7eb9a commit 9e31390
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 12 deletions.
18 changes: 9 additions & 9 deletions sink-connector-lightweight/tests/integration/tests/insert.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def simple_insert(
):
"""Check that simple insert to MySQL is properly propagated to the replicated ClickHouse table."""

table_name = f"`insert_{getuid()}`"
table_name = f"tb_{getuid()}"
mysql = self.context.cluster.node("mysql-master")

with Given(
Expand Down Expand Up @@ -142,7 +142,7 @@ def use_select_from_table_as_value(
@Name("one partition one part")
def one_partition_one_part(self, node=None):
"""Check `INSERT` that creates one partition and one part."""
name = f"{getuid()}"
name = f"tb_{getuid()}"

for clickhouse_table_engine in self.context.clickhouse_table_engines:
with Given("I create MySQL to ClickHouse replicated tables"):
Expand Down Expand Up @@ -182,7 +182,7 @@ def one_partition_one_part(self, node=None):
def one_partition_many_parts(self, node=None):
"""Check that `INSERT` that creates one partition with many parts to MySQL is properly propagated to the replicated
ClickHouse table."""
name = f"{getuid()}"
name = f"tb_{getuid()}"

for clickhouse_table_engine in self.context.clickhouse_table_engines:
with Given("I create MySQL to ClickHouse replicated tables"):
Expand Down Expand Up @@ -223,7 +223,7 @@ def one_partition_many_parts(self, node=None):
def one_partition_mixed_parts(self, node=None):
"""Check that `INSERT` that creates one partition with one large part and many small parts to MySQL is properly
propagated to the replicated ClickHouse table."""
name = f"{getuid()}"
name = f"tb_{getuid()}"

for clickhouse_table_engine in self.context.clickhouse_table_engines:
with Given("I create MySQL to ClickHouse replicated tables"):
Expand Down Expand Up @@ -277,7 +277,7 @@ def one_partition_mixed_parts(self, node=None):
def many_partitions_one_part(self, node=None):
"""Check that `INSERT` of many partitions and one part to MySQL is properly propagated to the replicated ClickHouse
table."""
name = f"{getuid()}"
name = f"tb_{getuid()}"

for clickhouse_table_engine in self.context.clickhouse_table_engines:
with Given("I create MySQL to ClickHouse replicated tables"):
Expand Down Expand Up @@ -323,7 +323,7 @@ def many_partitions_one_part(self, node=None):
def many_partitions_many_parts(self, node=None):
"""Check that `INSERT` of many partitions and many parts to MySQL is properly propagated to the replicated ClickHouse
table."""
name = f"{getuid()}"
name = f"tb_{getuid()}"

for clickhouse_table_engine in self.context.clickhouse_table_engines:
with Given("I create MySQL to ClickHouse replicated tables"):
Expand Down Expand Up @@ -364,7 +364,7 @@ def many_partitions_many_parts(self, node=None):
def many_partitions_mixed_parts(self, node=None):
"""Check that `INSERT` with of many partitions, each with one large part and many small parts to MySQL is properly
propagated to the replicated ClickHouse table."""
name = f"{getuid()}"
name = f"tb_{getuid()}"

for clickhouse_table_engine in self.context.clickhouse_table_engines:
with Given("I create MySQL to ClickHouse replicated tables"):
Expand Down Expand Up @@ -418,7 +418,7 @@ def many_partitions_mixed_parts(self, node=None):
def one_million_datapoints(self, node=None):
xfail("too big insert")
"""Check that `INSERT` of one million entries to MySQL is properly propagated to the replicated ClickHouse table."""
name = f"{getuid()}"
name = f"tb_{getuid()}"

for clickhouse_table_engine in self.context.clickhouse_table_engines:
with Given("I create MySQL to ClickHouse replicated tables"):
Expand Down Expand Up @@ -461,7 +461,7 @@ def one_million_datapoints(self, node=None):
@Name("parallel")
def parallel(self):
"""Check that after different `INSERT` queries in parallel MySQL and Clickhouse has the same data."""
name = f"{getuid()}"
name = f"tb_{getuid()}"

for clickhouse_table_engine in self.context.clickhouse_table_engines:
with Given("I create MySQL to ClickHouse replicated tables"):
Expand Down
2 changes: 1 addition & 1 deletion sink-connector/tests/integration/helpers/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def cmd(
self,
cmd,
message=None,
exitcode=None,
exitcode=0,
steps=True,
shell_command="bash --noediting",
no_checks=False,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,12 @@ def init_sink_connector(
"I start sink connector",
description="""Sending sink settings push command on bash_tools""",
):
node.cmd(f"{sink_settings_transfer_command_confluent}")

command = node.cmd(f"{sink_settings_transfer_command_confluent}")
assert command.output.strip().startswith(
'{"name":"sink-connector"'
) or command.output.strip().startswith(
'{"error_code":409,"message":"Connector sink-connector already exists"}'
), error()
yield
finally:
with Finally("I delete sink and debezium connections"):
Expand Down

0 comments on commit 9e31390

Please sign in to comment.