Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions tests/integration-tests/configs/common/common.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,26 @@ networking:
instances: {{ common.INSTANCES_DEFAULT_X86 }}
oss: ["alinux2"]
schedulers: ["slurm", "awsbatch"]
test_security_groups.py::test_additional_sg_and_ssh_from:
dimensions:
- regions: ["eu-north-1"]
instances: {{ common.INSTANCES_DEFAULT_X86 }}
oss: ["centos7"]
schedulers: ["slurm"]
- regions: ["eu-north-1"]
instances: {{ common.INSTANCES_DEFAULT_X86 }}
oss: ["alinux2"]
schedulers: ["awsbatch"]
test_security_groups.py::test_overwrite_sg:
dimensions:
- regions: ["eu-north-1"]
instances: {{ common.INSTANCES_DEFAULT_X86 }}
oss: ["centos7"]
schedulers: ["slurm"]
- regions: ["eu-north-1"]
instances: {{ common.INSTANCES_DEFAULT_X86 }}
oss: ["alinux2"]
schedulers: ["awsbatch"]
scaling:
test_scaling.py::test_hit_scaling:
dimensions:
Expand Down
2 changes: 1 addition & 1 deletion tests/integration-tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -617,7 +617,7 @@ def vpc_stacks(cfn_stacks_factory, request):
return vpc_stacks


@pytest.fixture()
@pytest.fixture(scope="class")
def vpc_stack(vpc_stacks, region):
return vpc_stacks[region]

Expand Down
20 changes: 8 additions & 12 deletions tests/integration-tests/tests/dcv/test_dcv.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,16 @@
import os as operating_system
import re

import boto3
import pytest
from assertpy import assert_that
from remote_command_executor import RemoteCommandExecutor
from utils import add_keys_to_known_hosts, get_username_for_os, remove_keys_from_known_hosts, run_command
from utils import (
add_keys_to_known_hosts,
check_headnode_security_group,
get_username_for_os,
remove_keys_from_known_hosts,
run_command,
)

from tests.cloudwatch_logging.test_cloudwatch_logging import FeatureSpecificCloudWatchLoggingTestRunner

Expand Down Expand Up @@ -106,7 +111,7 @@ def _test_dcv_configuration(
remote_command_executor = RemoteCommandExecutor(cluster)

# check configuration parameters
_check_security_group(region, cluster, dcv_port, expected_cidr=access_from)
check_headnode_security_group(region, cluster, dcv_port, expected_cidr=access_from)

# dcv connect show url
env = operating_system.environ.copy()
Expand Down Expand Up @@ -198,15 +203,6 @@ def _check_auth_ok(remote_command_executor, external_authenticator_port, session
).is_equal_to('<auth result="yes"><username>{0}</username></auth>'.format(username))


def _check_security_group(region, cluster, port, expected_cidr):
security_group_id = cluster.cfn_resources.get("MasterSecurityGroup")
response = boto3.client("ec2", region_name=region).describe_security_groups(GroupIds=[security_group_id])

ips = response["SecurityGroups"][0]["IpPermissions"]
target = next(filter(lambda x: x.get("FromPort", -1) == port, ips), {})
assert_that(target["IpRanges"][0]["CidrIp"]).is_equal_to(expected_cidr)


def _check_no_crashes(remote_command_executor, test_datadir):
"""Verify no core files in /var/crash, which on ubuntu18 causes a popup when logging into the 1st session."""
remote_command_executor.run_remote_script(str(test_datadir / "verify_no_core_files.sh"))
152 changes: 152 additions & 0 deletions tests/integration-tests/tests/networking/test_security_groups.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging

import boto3
import pytest
from assertpy import assert_that
from cfn_stacks_factory import CfnStack
from troposphere import Ref, Template
from troposphere.ec2 import SecurityGroup, SecurityGroupIngress
from utils import check_headnode_security_group, random_alphanumeric


@pytest.mark.usefixtures("os", "scheduler", "instance")
def test_additional_sg_and_ssh_from(region, custom_security_group, pcluster_config_reader, clusters_factory):
"""
Test when additional_sg ssh_from are provided in the config file

The additional security group should be added to the head and compute nodes. The
"""
custom_security_group_id = custom_security_group.cfn_resources["SecurityGroupResource"]
ssh_from = "10.11.12.0/32"
cluster_config = pcluster_config_reader(additional_sg=custom_security_group_id, ssh_from=ssh_from)
cluster = clusters_factory(cluster_config)
ec2_client = boto3.client("ec2", region_name=region)
instances = _get_instances_by_security_group(ec2_client, custom_security_group_id)
logging.info("Asserting that head node and compute node has the additional security group")
assert_that(instances).is_length(2)
logging.info("Asserting the security group of pcluster is not overwritten by additional seurity group")
for instance in instances:
assert_that(
any(
security_group["GroupName"].startswith("parallelcluster")
for security_group in instance["SecurityGroups"]
)
).is_true()
logging.info("Asserting the security group of pcluster on the head node is aligned with ssh_from")
check_headnode_security_group(region, cluster, 22, ssh_from)


@pytest.mark.usefixtures("os", "scheduler", "instance")
def test_overwrite_sg(region, custom_security_group, pcluster_config_reader, clusters_factory):
"""Test vpc_security_group_id overwrites pcluster default sg on head and compute nodes, efs, fsx"""
custom_security_group_id = custom_security_group.cfn_resources["SecurityGroupResource"]
cluster_config = pcluster_config_reader(vpc_security_group_id=custom_security_group_id)
cluster = clusters_factory(cluster_config)
ec2_client = boto3.client("ec2", region_name=region)
instances = _get_instances_by_security_group(ec2_client, custom_security_group_id)
logging.info("Asserting that head node and compute node has and only has the custom security group")
assert_that(instances).is_length(2)
for instance in instances:
assert_that(instance["SecurityGroups"]).is_length(1)

cfn_client = boto3.client("cloudformation", region_name=region)

logging.info("Collecting security groups of the FSx")
fsx_id = cfn_client.describe_stack_resource(
StackName=cluster.cfn_resources["FSXSubstack"], LogicalResourceId="FileSystem"
)["StackResourceDetail"]["PhysicalResourceId"]
fsx_client = boto3.client("fsx", region_name=region)
network_interface_id = fsx_client.describe_file_systems(FileSystemIds=[fsx_id])["FileSystems"][0][
"NetworkInterfaceIds"
][0]
fsx_security_groups = ec2_client.describe_network_interfaces(NetworkInterfaceIds=[network_interface_id])[
"NetworkInterfaces"
][0]["Groups"]
logging.info("Asserting the network interface of FSx has and only has the custom security group")
assert_that(fsx_security_groups[0]["GroupId"]).is_equal_to(custom_security_group_id)
assert_that(fsx_security_groups).is_length(1)

logging.info("Collecting security groups of the EFS")
efs_id = cfn_client.describe_stack_resource(
StackName=cluster.cfn_resources["EFSSubstack"], LogicalResourceId="EFSFS"
)["StackResourceDetail"]["PhysicalResourceId"]
efs_client = boto3.client("efs", region_name=region)
mount_target_ids = [
mount_target["MountTargetId"]
for mount_target in efs_client.describe_mount_targets(FileSystemId=efs_id)["MountTargets"]
]
logging.info("Asserting the mount targets of EFS has and only has the custom security group")
for mount_target_id in mount_target_ids:
mount_target_security_groups = efs_client.describe_mount_target_security_groups(MountTargetId=mount_target_id)[
"SecurityGroups"
]
assert_that(mount_target_security_groups[0]).is_equal_to(custom_security_group_id)
assert_that(mount_target_security_groups).is_length(1)


@pytest.fixture(scope="class")
def custom_security_group(vpc_stack, region, request, cfn_stacks_factory):
template = Template()
template.set_version("2010-09-09")
template.set_description("custom security group stack for testing additional_sg and vpc_security_group_id")
security_group = template.add_resource(
SecurityGroup(
"SecurityGroupResource",
GroupDescription="custom security group for testing additional_sg and vpc_security_group_id",
VpcId=vpc_stack.cfn_outputs["VpcId"],
)
)
template.add_resource(
SecurityGroupIngress(
"SecurityGroupIngressResource",
IpProtocol="-1",
FromPort=0,
ToPort=65535,
SourceSecurityGroupId=Ref(security_group),
GroupId=Ref(security_group),
)
)
stack = CfnStack(
name="integ-tests-custom-sg-{0}{1}{2}".format(
random_alphanumeric(),
"-" if request.config.getoption("stackname_suffix") else "",
request.config.getoption("stackname_suffix"),
),
region=region,
template=template.to_json(),
)
cfn_stacks_factory.create_stack(stack)

yield stack

if not request.config.getoption("no_delete"):
cfn_stacks_factory.delete_stack(stack.name, region)


def _get_instances_by_security_group(ec2_client, security_group_id):
logging.info("Collecting security groups of the head node and compute node")
paginator = ec2_client.get_paginator("describe_instances")
page_iterator = paginator.paginate(
Filters=[
{
"Name": "network-interface.group-id",
"Values": [security_group_id],
}
]
)
instances = []
for page in page_iterator:
for reservation in page["Reservations"]:
instances.extend(reservation["Instances"])
return instances
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
[global]
cluster_template = default

[aws]
aws_region_name = {{ region }}

[cluster default]
base_os = {{ os }}
key_name = {{ key_name }}
vpc_settings = parallelcluster-vpc
scheduler = {{ scheduler }}
master_instance_type = {{ instance }}
compute_instance_type = {{ instance }}
{% if scheduler == "awsbatch" %}
min_vcpus = 1
desired_vcpus = 1
{% else %}
initial_queue_size = 1
maintain_initial_size = true
{% endif %}

[vpc parallelcluster-vpc]
vpc_id = {{ vpc_id }}
master_subnet_id = {{ public_subnet_id }}
compute_subnet_id = {{ private_additional_cidr_subnet_id }}
additional_sg = {{ additional_sg }}
use_public_ips = false
ssh_from = {{ ssh_from }}
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
[global]
cluster_template = default

[aws]
aws_region_name = {{ region }}

[cluster default]
base_os = {{ os }}
key_name = {{ key_name }}
vpc_settings = parallelcluster-vpc
scheduler = {{ scheduler }}
master_instance_type = {{ instance }}
compute_instance_type = {{ instance }}
{% if scheduler == "awsbatch" %}
min_vcpus = 1
desired_vcpus = 1
{% else %}
initial_queue_size = 1
maintain_initial_size = true
{% endif %}
efs_settings = parallelcluster-efs
fsx_settings = parallelcluster-fsx

[vpc parallelcluster-vpc]
vpc_id = {{ vpc_id }}
master_subnet_id = {{ public_subnet_id }}
compute_subnet_id = {{ private_additional_cidr_subnet_id }}
vpc_security_group_id = {{ vpc_security_group_id }}
use_public_ips = false

[efs parallelcluster-efs]
shared_dir = efs

[fsx parallelcluster-fsx]
shared_dir = fsx
storage_capacity = 1200
deployment_type = SCRATCH_2
10 changes: 10 additions & 0 deletions tests/integration-tests/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -362,3 +362,13 @@ def get_architecture_supported_by_instance_type(instance_type, region_name=None)
assert_that(len(instance_architectures)).is_equal_to(1)

return instance_architectures[0]


def check_headnode_security_group(region, cluster, port, expected_cidr):
"""Check CIDR restriction for a port is in the security group of the head node of the cluster"""
security_group_id = cluster.cfn_resources.get("MasterSecurityGroup")
response = boto3.client("ec2", region_name=region).describe_security_groups(GroupIds=[security_group_id])

ips = response["SecurityGroups"][0]["IpPermissions"]
target = next(filter(lambda x: x.get("FromPort", -1) == port, ips), {})
assert_that(target["IpRanges"][0]["CidrIp"]).is_equal_to(expected_cidr)