Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 3 additions & 14 deletions .github/workflows/e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ env:
DZ_BASE_IMAGE: ghcr.io/malbeclabs/dz-e2e/base:${{ github.sha }}
DZ_LEDGER_IMAGE: ghcr.io/malbeclabs/dz-e2e/ledger:${{ github.sha }}
DZ_CONTROLLER_IMAGE: ghcr.io/malbeclabs/dz-e2e/controller:${{ github.sha }}
DZ_ACTIVATOR_IMAGE: ghcr.io/malbeclabs/dz-e2e/activator:${{ github.sha }}
DZ_MANAGER_IMAGE: ghcr.io/malbeclabs/dz-e2e/manager:${{ github.sha }}
DZ_FUNDER_IMAGE: ghcr.io/malbeclabs/dz-e2e/funder:${{ github.sha }}
DZ_DEVICE_IMAGE: ghcr.io/malbeclabs/dz-e2e/device:${{ github.sha }}
Expand Down Expand Up @@ -57,7 +56,6 @@ jobs:
docker push ${{ env.DZ_IMAGE_REPO }}/base:${{ env.DZ_IMAGE_TAG }}
docker push ${{ env.DZ_IMAGE_REPO }}/ledger:${{ env.DZ_IMAGE_TAG }}
docker push ${{ env.DZ_IMAGE_REPO }}/controller:${{ env.DZ_IMAGE_TAG }}
docker push ${{ env.DZ_IMAGE_REPO }}/activator:${{ env.DZ_IMAGE_TAG }}
docker push ${{ env.DZ_IMAGE_REPO }}/manager:${{ env.DZ_IMAGE_TAG }}
docker push ${{ env.DZ_IMAGE_REPO }}/funder:${{ env.DZ_IMAGE_TAG }}
docker push ${{ env.DZ_IMAGE_REPO }}/device:${{ env.DZ_IMAGE_TAG }}
Expand All @@ -84,9 +82,6 @@ jobs:
# many parallel subtests internally and would starve other tests.
dedicated="TestE2E_BackwardCompatibility"

# Shard 2: Tests that also run with onchain allocation enabled.
onchain_tests="TestE2E_IBRL|TestE2E_IBRL_WithAllocatedIP|TestE2E_Multicast|TestE2E_InterfaceValidation|TestE2E_UserLimits|TestE2E_MulticastPublisher_BothAllocationPaths"

remaining=$(echo "$tests" | grep -v "^${dedicated}$")

# Distribute remaining tests round-robin across shards
Expand All @@ -106,13 +101,10 @@ jobs:
i=$((i + 1))
done <<< "$remaining"

# Build JSON matrix: shard 1 = dedicated (legacy), shard 2 = dedicated (onchain),
# shard 3 = onchain (no activator), shards 4+ = round-robin (legacy)
matrix="[{\"shard\":1,\"run\":\"^(${dedicated})$\",\"onchain_allocation\":\"\",\"disable_activator\":\"\"}"
matrix="${matrix},{\"shard\":2,\"run\":\"^(${dedicated})$\",\"onchain_allocation\":\"true\",\"disable_activator\":\"\"}"
matrix="${matrix},{\"shard\":3,\"run\":\"^(${onchain_tests})$\",\"onchain_allocation\":\"true\",\"disable_activator\":\"true\"}"
# Build JSON matrix: shard 1 = BackwardCompatibility, shards 2+ = round-robin
matrix="[{\"shard\":1,\"run\":\"^(${dedicated})$\"}"
for ((i=0; i<SHARD_COUNT; i++)); do
matrix="${matrix},{\"shard\":$((i + 4)),\"run\":\"^(${shards[$i]})$\",\"onchain_allocation\":\"\",\"disable_activator\":\"\"}"
matrix="${matrix},{\"shard\":$((i + 2)),\"run\":\"^(${shards[$i]})$\"}"
done
matrix="${matrix}]"

Expand Down Expand Up @@ -172,7 +164,6 @@ jobs:
pull_with_retry ${{ env.DZ_IMAGE_REPO }}/base:${{ env.DZ_IMAGE_TAG }}
pull_with_retry ${{ env.DZ_IMAGE_REPO }}/ledger:${{ env.DZ_IMAGE_TAG }}
pull_with_retry ${{ env.DZ_IMAGE_REPO }}/controller:${{ env.DZ_IMAGE_TAG }}
pull_with_retry ${{ env.DZ_IMAGE_REPO }}/activator:${{ env.DZ_IMAGE_TAG }}
pull_with_retry ${{ env.DZ_IMAGE_REPO }}/manager:${{ env.DZ_IMAGE_TAG }}
pull_with_retry ${{ env.DZ_IMAGE_REPO }}/funder:${{ env.DZ_IMAGE_TAG }}
pull_with_retry ${{ env.DZ_IMAGE_REPO }}/device:${{ env.DZ_IMAGE_TAG }}
Expand All @@ -186,6 +177,4 @@ jobs:
- name: test
env:
DZ_E2E_NO_BUILD: "1"
DZ_E2E_ONCHAIN_ALLOCATION: ${{ matrix.onchain_allocation }}
DZ_E2E_DISABLE_ACTIVATOR: ${{ matrix.disable_activator }}
run: go test -tags=e2e -timeout=20m -parallel=12 -run '${{ matrix.run }}' -v
1 change: 0 additions & 1 deletion e2e/.env.local
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ DZ_BASE_IMAGE=${DZ_IMAGE_REPO}/base:${DZ_IMAGE_TAG}

DZ_LEDGER_IMAGE=${DZ_IMAGE_REPO}/ledger:${DZ_IMAGE_TAG}
DZ_CONTROLLER_IMAGE=${DZ_IMAGE_REPO}/controller:${DZ_IMAGE_TAG}
DZ_ACTIVATOR_IMAGE=${DZ_IMAGE_REPO}/activator:${DZ_IMAGE_TAG}
DZ_MANAGER_IMAGE=${DZ_IMAGE_REPO}/manager:${DZ_IMAGE_TAG}
DZ_FUNDER_IMAGE=${DZ_IMAGE_REPO}/funder:${DZ_IMAGE_TAG}
DZ_DEVICE_IMAGE=${DZ_IMAGE_REPO}/device:${DZ_IMAGE_TAG}
Expand Down
177 changes: 0 additions & 177 deletions e2e/activator_interface_delete_test.go

This file was deleted.

21 changes: 3 additions & 18 deletions e2e/allocation_lifecycle_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,6 @@ func TestE2E_User_AllocationLifecycle(t *testing.T) {
Manager: devnet.ManagerSpec{
ServiceabilityProgramKeypairPath: serviceabilityProgramKeypairPath,
},
Activator: devnet.ActivatorSpec{
OnchainAllocation: devnet.BoolPtr(true),
},
}, log, dockerClient, subnetAllocator)
require.NoError(t, err)

Expand Down Expand Up @@ -276,9 +273,6 @@ func TestE2E_MulticastGroup_AllocationLifecycle(t *testing.T) {
Manager: devnet.ManagerSpec{
ServiceabilityProgramKeypairPath: serviceabilityProgramKeypairPath,
},
Activator: devnet.ActivatorSpec{
OnchainAllocation: devnet.BoolPtr(true),
},
}, log, dockerClient, subnetAllocator)
require.NoError(t, err)

Expand Down Expand Up @@ -306,7 +300,7 @@ func TestE2E_MulticastGroup_AllocationLifecycle(t *testing.T) {

// Create multicast group
// Note: We don't use -w (wait for activation) here because there's a race condition
// between the activator's initial fetch and the multicast group creation. The activator
// between the program's initial fetch and the multicast group creation. The program
// polls every 60 seconds, which matches the CLI's -w timeout, causing failures.
// Instead, we let require.Eventually below handle the wait for activation.
log.Debug("==> Creating multicast group")
Expand All @@ -317,7 +311,7 @@ func TestE2E_MulticastGroup_AllocationLifecycle(t *testing.T) {
require.NoError(t, err)

// Wait for multicast group to be activated
// Note: Activator polls every 60 seconds, so we need a timeout > 60s to be safe
// Note: Reactivation polling every 60 seconds, so we need a timeout > 60s to be safe
log.Debug("==> Waiting for multicast group activation")
var activatedMC *serviceability.MulticastGroup
require.Eventually(t, func() bool {
Expand Down Expand Up @@ -419,9 +413,6 @@ func TestE2E_MultipleLinks_AllocationLifecycle(t *testing.T) {
Manager: devnet.ManagerSpec{
ServiceabilityProgramKeypairPath: serviceabilityProgramKeypairPath,
},
Activator: devnet.ActivatorSpec{
OnchainAllocation: devnet.BoolPtr(true),
},
}, log, dockerClient, subnetAllocator)
require.NoError(t, err)

Expand Down Expand Up @@ -595,7 +586,7 @@ func TestE2E_MultipleLinks_AllocationLifecycle(t *testing.T) {
// Bug scenario:
// 1. User with Multicast type is activated as publisher → allocates tunnel_net, tunnel_id, dz_ip
// 2. User disconnects and reconnects with two pub groups → sets status to Updating
// 3. Activator re-activates user → BUG: would allocate NEW resources instead of keeping existing
// 3. Program re-activates user → BUG: would allocate NEW resources instead of keeping existing
//
// The fix preserves existing tunnel_net/tunnel_id/dz_ip allocations.
func TestE2E_Multicast_ReactivationPreservesAllocations(t *testing.T) {
Expand All @@ -621,9 +612,6 @@ func TestE2E_Multicast_ReactivationPreservesAllocations(t *testing.T) {
Manager: devnet.ManagerSpec{
ServiceabilityProgramKeypairPath: serviceabilityProgramKeypairPath,
},
Activator: devnet.ActivatorSpec{
OnchainAllocation: devnet.BoolPtr(true),
},
}, log, dockerClient, subnetAllocator)
require.NoError(t, err)

Expand Down Expand Up @@ -968,9 +956,6 @@ func TestE2E_LoopbackInterface_AllocationLifecycle(t *testing.T) {
Manager: devnet.ManagerSpec{
ServiceabilityProgramKeypairPath: serviceabilityProgramKeypairPath,
},
Activator: devnet.ActivatorSpec{
OnchainAllocation: devnet.BoolPtr(true),
},
}, log, dockerClient, subnetAllocator)
require.NoError(t, err)

Expand Down
Loading
Loading