Skip to content

Commit

Permalink
removing __init__.py from azure-mgmt-batchai/tests to fix loading err…
Browse files Browse the repository at this point in the history
…ors (#2820)
  • Loading branch information
schaabs authored and lmazuel committed Jun 27, 2018
1 parent 1aae6df commit b52e0cf
Show file tree
Hide file tree
Showing 8 changed files with 721 additions and 716 deletions.
Empty file.
927 changes: 466 additions & 461 deletions azure-mgmt-batchai/tests/helpers.py

Large diffs are not rendered by default.

94 changes: 47 additions & 47 deletions azure-mgmt-batchai/tests/test_mgmt_batchai_clusters.py
Expand Up @@ -12,17 +12,17 @@
from devtools_testutils import ResourceGroupPreparer
from devtools_testutils import StorageAccountPreparer

from . import helpers
from helpers import Helpers


class ClusterTestCase(AzureMgmtTestCase):
def setUp(self):
super(ClusterTestCase, self).setUp()
self.client = helpers.create_batchai_client(self) # type: BatchAIManagementClient
self.client = Helpers.create_batchai_client(self) # type: BatchAIManagementClient
self.cluster_name = self.get_resource_name('cluster')

@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@ResourceGroupPreparer(location=Helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=Helpers.LOCATION, playback_fake_resource=Helpers.FAKE_STORAGE)
def test_creation_and_deletion(self, resource_group, location, storage_account, storage_account_key):
"""Tests basic use-case scenario.
Expand All @@ -31,7 +31,7 @@ def test_creation_and_deletion(self, resource_group, location, storage_account,
3. Execute a task in a docker container
4. Delete cluster
"""
cluster = helpers.create_cluster(
cluster = Helpers.create_cluster(
self.client, location, resource_group.name, self.cluster_name, 'STANDARD_D1', 1,
storage_account.name, storage_account_key)

Expand All @@ -40,42 +40,42 @@ def test_creation_and_deletion(self, resource_group, location, storage_account,
self.assertEqual(cluster.vm_size, 'STANDARD_D1')

# Verify that the cluster is reported in the list of clusters
helpers.assert_existing_clusters_are(self, self.client, resource_group.name, [self.cluster_name])
Helpers.assert_existing_clusters_are(self, self.client, resource_group.name, [self.cluster_name])

# Verify that one node is allocated and become available
self.assertEqual(
helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 1,
helpers.NODE_STARTUP_TIMEOUT_SEC), 1)
helpers.assert_remote_login_info_reported_for_nodes(self, self.client, resource_group.name,
Helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 1,
Helpers.NODE_STARTUP_TIMEOUT_SEC), 1)
Helpers.assert_remote_login_info_reported_for_nodes(self, self.client, resource_group.name,
self.cluster_name, 1)

# Verify that the cluster able to run tasks.
self.assertCanRunJobOnHost(resource_group, location, cluster.id)
self.assertCanRunJobInContainer(resource_group, location, cluster.id)

# Test cluster deletion
self.client.clusters.delete(resource_group.name, helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name).result()
helpers.assert_existing_clusters_are(self, self.client, resource_group.name, [])
self.client.clusters.delete(resource_group.name, Helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name).result()
Helpers.assert_existing_clusters_are(self, self.client, resource_group.name, [])

@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@ResourceGroupPreparer(location=Helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=Helpers.LOCATION, playback_fake_resource=Helpers.FAKE_STORAGE)
def test_setup_task_execution(self, resource_group, location, storage_account, storage_account_key):
"""Tests setup task execution.
"""
cluster = helpers.create_cluster(
cluster = Helpers.create_cluster(
self.client, location, resource_group.name, self.cluster_name, 'STANDARD_D1', 1,
storage_account.name, storage_account_key,
setup_task_cmd='echo $GREETING $SECRET_GREETING',
setup_task_env={'GREETING': 'setup task'},
setup_task_secrets={'SECRET_GREETING': 'has a secret'}) # type: models.Cluster

# Verify that the cluster is reported in the list of clusters
helpers.assert_existing_clusters_are(self, self.client, resource_group.name, [self.cluster_name])
Helpers.assert_existing_clusters_are(self, self.client, resource_group.name, [self.cluster_name])

# Verify that one node is allocated and become available
self.assertEqual(
helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 1,
helpers.NODE_STARTUP_TIMEOUT_SEC), 1)
Helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 1,
Helpers.NODE_STARTUP_TIMEOUT_SEC), 1)

# Check that server doesn't return values for secrets
self.assertEqual(len(cluster.node_setup.setup_task.secrets), 1)
Expand All @@ -84,95 +84,95 @@ def test_setup_task_execution(self, resource_group, location, storage_account, s
# Verify that the setup task is completed by checking generated output. BatchAI reports a path which was auto-
# generated for storing setup output logs.
setup_task_output_path = cluster.node_setup.setup_task.std_out_err_path_suffix
nodes = helpers.get_node_ids(self.client, resource_group.name, self.cluster_name)
nodes = Helpers.get_node_ids(self.client, resource_group.name, self.cluster_name)
self.assertEqual(len(nodes), 1)
node_id = nodes[0]
helpers.assert_file_in_file_share(self, storage_account.name, storage_account_key,
Helpers.assert_file_in_file_share(self, storage_account.name, storage_account_key,
setup_task_output_path,
'stdout-{0}.txt'.format(node_id),
u'setup task has a secret\n')
helpers.assert_file_in_file_share(self, storage_account.name, storage_account_key,
Helpers.assert_file_in_file_share(self, storage_account.name, storage_account_key,
setup_task_output_path, 'stderr-{0}.txt'.format(node_id), u'')
self.client.clusters.delete(resource_group.name, helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name).result()
self.client.clusters.delete(resource_group.name, Helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name).result()

@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@ResourceGroupPreparer(location=Helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=Helpers.LOCATION, playback_fake_resource=Helpers.FAKE_STORAGE)
def test_cluster_resizing(self, resource_group, location, storage_account, storage_account_key):
"""Tests manual cluster resizing"""
cluster = helpers.create_cluster(
cluster = Helpers.create_cluster(
self.client, location, resource_group.name, self.cluster_name, 'STANDARD_D1', 1,
storage_account.name, storage_account_key)

# Verify that one node is allocated and become available
self.assertEqual(
helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 1,
helpers.NODE_STARTUP_TIMEOUT_SEC), 1)
helpers.assert_remote_login_info_reported_for_nodes(self, self.client, resource_group.name,
Helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 1,
Helpers.NODE_STARTUP_TIMEOUT_SEC), 1)
Helpers.assert_remote_login_info_reported_for_nodes(self, self.client, resource_group.name,
self.cluster_name, 1)

self.assertCanResizeCluster(resource_group, 0)
self.assertCanResizeCluster(resource_group, 1)

# Verify that cluster able to run tasks after resizing.
self.assertCanRunJobOnHost(resource_group, location, cluster.id)
self.client.clusters.delete(resource_group.name, helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name).result()
self.client.clusters.delete(resource_group.name, Helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name).result()

@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@ResourceGroupPreparer(location=Helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=Helpers.LOCATION, playback_fake_resource=Helpers.FAKE_STORAGE)
def test_auto_scaling(self, resource_group, location, storage_account, storage_account_key):
"""Tests auto-scaling"""
# Create the cluster with no nodes.
cluster = helpers.create_cluster(
cluster = Helpers.create_cluster(
self.client, location, resource_group.name, self.cluster_name, 'STANDARD_D1', 0,
storage_account.name, storage_account_key)

# Switch the cluster into auto-scale mode
self.client.clusters.update(resource_group.name, helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name,
self.client.clusters.update(resource_group.name, Helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name,
scale_settings=models.ScaleSettings(
auto_scale=models.AutoScaleSettings(
minimum_node_count=0,
maximum_node_count=1)))

# Submit a task. BatchAI must increase the number of nodes to execute the task.
self.assertCanRunJobOnHost(resource_group, location, cluster.id, timeout_sec=helpers.AUTO_SCALE_TIMEOUT_SEC)
self.assertCanRunJobOnHost(resource_group, location, cluster.id, timeout_sec=Helpers.AUTO_SCALE_TIMEOUT_SEC)

# Verify that cluster downsized to zero since there are no more jobs for it
self.assertEqual(
helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 0,
helpers.NODE_STARTUP_TIMEOUT_SEC), 0)
self.client.clusters.delete(resource_group.name, helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name).result()
Helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 0,
Helpers.NODE_STARTUP_TIMEOUT_SEC), 0)
self.client.clusters.delete(resource_group.name, Helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name).result()

def assertCanRunJobInContainer(self, resource_group, location, cluster_id, timeout_sec=helpers.MINUTE):
def assertCanRunJobInContainer(self, resource_group, location, cluster_id, timeout_sec=Helpers.MINUTE):
self.assertCanRunJob(resource_group, location, cluster_id, 'container_job',
models.ContainerSettings(image_source_registry=models.ImageSourceRegistry(image="ubuntu")),
timeout_sec)

def assertCanRunJobOnHost(self, resource_group, location, cluster_id, timeout_sec=helpers.MINUTE):
def assertCanRunJobOnHost(self, resource_group, location, cluster_id, timeout_sec=Helpers.MINUTE):
self.assertCanRunJob(resource_group, location, cluster_id, 'host_job', None, timeout_sec)

def assertCanRunJob(self, resource_group, location, cluster_id, job_name, container_settings, timeout_sec):
helpers.create_custom_job(self.client, resource_group.name, cluster_id, job_name, 1,
Helpers.create_custom_job(self.client, resource_group.name, cluster_id, job_name, 1,
'echo hello | tee $AZ_BATCHAI_OUTPUT_OUTPUTS/hi.txt', container=container_settings)

# Verify if the job finishes reasonably fast.
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job_name, timeout_sec),
Helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job_name, timeout_sec),
models.ExecutionState.succeeded)

# Verify if output files and standard output files are available and contain expected greeting.
helpers.assert_job_files_are(self, self.client, resource_group.name, job_name, 'OUTPUTS',
Helpers.assert_job_files_are(self, self.client, resource_group.name, job_name, 'OUTPUTS',
{u'hi.txt': u'hello\n'})
helpers.assert_job_files_are(self, self.client, resource_group.name, job_name,
helpers.STANDARD_OUTPUT_DIRECTORY_ID,
Helpers.assert_job_files_are(self, self.client, resource_group.name, job_name,
Helpers.STANDARD_OUTPUT_DIRECTORY_ID,
{u'stdout.txt': u'hello\n', u'stderr.txt': ''})

def assertCanResizeCluster(self, resource_group, target):
self.client.clusters.update(resource_group.name, helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name,
self.client.clusters.update(resource_group.name, Helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name,
scale_settings=models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=target)))
self.assertEqual(
helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, target,
helpers.NODE_STARTUP_TIMEOUT_SEC),
Helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, target,
Helpers.NODE_STARTUP_TIMEOUT_SEC),
target)
helpers.assert_remote_login_info_reported_for_nodes(self, self.client, resource_group.name,
Helpers.assert_remote_login_info_reported_for_nodes(self, self.client, resource_group.name,
self.cluster_name, target)
12 changes: 6 additions & 6 deletions azure-mgmt-batchai/tests/test_mgmt_batchai_experiments.py
Expand Up @@ -10,15 +10,15 @@
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
from msrestazure.azure_exceptions import CloudError

from . import helpers
from helpers import Helpers


class ExperimentTestCase(AzureMgmtTestCase):
def setUp(self):
super(ExperimentTestCase, self).setUp()
self.client = helpers.create_batchai_client(self) # type: BatchAIManagementClient
self.client = Helpers.create_batchai_client(self) # type: BatchAIManagementClient

@ResourceGroupPreparer(location=helpers.LOCATION)
@ResourceGroupPreparer(location=Helpers.LOCATION)
def test_creation_and_deletion(self, resource_group, location):
name = 'testee'
workspace_name = 'workspace'
Expand All @@ -37,7 +37,7 @@ def test_creation_and_deletion(self, resource_group, location):
# Check the experiment is actually deleted
self.assertRaises(CloudError, lambda: self.client.experiments.get(resource_group.name, workspace_name, name))

@ResourceGroupPreparer(location=helpers.LOCATION)
@ResourceGroupPreparer(location=Helpers.LOCATION)
def test_experiments_isolation(self, resource_group, location):
self.client.workspaces.create(resource_group.name, 'first', location).result()
self.client.workspaces.create(resource_group.name, 'second', location).result()
Expand All @@ -52,8 +52,8 @@ def test_experiments_isolation(self, resource_group, location):
scale_settings=models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=0)),
user_account_settings=models.UserAccountSettings(
admin_user_name=helpers.ADMIN_USER_NAME,
admin_user_password=helpers.ADMIN_USER_PASSWORD
admin_user_name=Helpers.ADMIN_USER_NAME,
admin_user_password=Helpers.ADMIN_USER_PASSWORD
),
vm_priority='lowpriority'
)).result()
Expand Down

0 comments on commit b52e0cf

Please sign in to comment.