Skip to content

Commit

Permalink
clear depduplication cache when SQS FifoQueue is purged (#8218)
Browse files Browse the repository at this point in the history
  • Loading branch information
martin-walsh committed May 10, 2023
1 parent 2977f13 commit 815c399
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 0 deletions.
1 change: 1 addition & 0 deletions localstack/services/sqs/models.py
Expand Up @@ -983,6 +983,7 @@ def clear(self):
self.message_groups.clear()
self.inflight_groups.clear()
self.message_group_queue.queue.clear()
self.deduplication.clear()


class SqsStore(BaseStore):
Expand Down
30 changes: 30 additions & 0 deletions tests/integration/test_sqs.py
Expand Up @@ -2924,6 +2924,36 @@ def test_purge_queue_deletes_delayed_messages(self, sqs_create_queue, aws_client
receive_result = aws_client.sqs.receive_message(QueueUrl=queue_url, WaitTimeSeconds=1)
assert "Messages" not in receive_result.keys()

@pytest.mark.aws_validated
def test_purge_queue_clears_fifo_deduplication_cache(self, sqs_create_queue, aws_client):
fifo_queue_name = f"test-queue-{short_uid()}.fifo"
queue_url = sqs_create_queue(QueueName=fifo_queue_name, Attributes={"FifoQueue": "true"})
dedup_id = f"fifo_dedup-{short_uid()}"
group_id = f"fifo_group-{short_uid()}"

aws_client.sqs.send_message(
QueueUrl=queue_url,
MessageBody="message-1",
MessageGroupId=group_id,
MessageDeduplicationId=dedup_id,
)

aws_client.sqs.purge_queue(QueueUrl=queue_url)

aws_client.sqs.send_message(
QueueUrl=queue_url,
MessageBody="message-2",
MessageGroupId=group_id,
MessageDeduplicationId=dedup_id,
)

receive_result = aws_client.sqs.receive_message(QueueUrl=queue_url, WaitTimeSeconds=1)

assert len(receive_result["Messages"]) == 1
message = receive_result["Messages"][0]

assert message["Body"] == "message-2"

@pytest.mark.aws_validated
@pytest.mark.skip_snapshot_verify(paths=["$..Error.Detail"])
def test_successive_purge_calls_fail(self, sqs_create_queue, monkeypatch, snapshot, aws_client):
Expand Down

0 comments on commit 815c399

Please sign in to comment.