Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions src/azure-cli/azure/cli/command_modules/batch/_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,14 @@ def load_arguments(self, _):
arg_type=get_enum_type(NodeCommunicationMode))
c.extra('enable_accelerated_networking', arg_type=get_three_state_flag(), options_list=['--accelerated-networking'], arg_group="Pool: Network Configuration",
help='Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual- network/accelerated-networking-overview. Set true to enable.')
c.argument('mode', options_list=['--upgrade-policy-mode'], help='The mode of the pool OS upgrade.')
c.argument('disable_automatic_rollback', options_list=['--disable-auto-rollback'], arg_type=get_three_state_flag())
c.argument('enable_automatic_os_upgrade', options_list=['--enable-auto-os-upgrade'], arg_type=get_three_state_flag())
c.argument('os_rolling_upgrade_deferral', options_list=['--defer-os-rolling-upgrade'], arg_type=get_three_state_flag())
c.argument('use_rolling_upgrade_policy', arg_type=get_three_state_flag())
c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag())
c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag())
c.argument('rollback_failed_instances_on_policy_breach', arg_type=get_three_state_flag())
Comment on lines +285 to +291
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why don't you add help messages for these new parameters?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I find those arguments have default help message from our python SDK which are already good enough so omitting here.


with self.argument_context('batch pool set') as c:
c.argument('target_node_communication_mode', options_list=['--target-communication'],
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
---
# exclusions for the batch module

batch pool create:
parameters:
enable_cross_zone_upgrade:
rule_exclusions:
- option_length_too_long
max_batch_instance_percent:
rule_exclusions:
- option_length_too_long
max_unhealthy_instance_percent:
rule_exclusions:
- option_length_too_long
max_unhealthy_upgraded_instance_percent:
rule_exclusions:
- option_length_too_long
os_rolling_upgrade_deferral:
rule_exclusions:
- option_length_too_long
pause_time_between_batches:
rule_exclusions:
- option_length_too_long
prioritize_unhealthy_instances:
rule_exclusions:
- option_length_too_long
rollback_failed_instances_on_policy_breach:
rule_exclusions:
- option_length_too_long
use_rolling_upgrade_policy:
rule_exclusions:
- option_length_too_long

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,7 @@ def test_batch_load_arguments(self):
# pylint: disable=too-many-statements
handler = operations._pool_operations.PoolOperations.add
args = list(self.command_pool._load_transformed_arguments(handler))
self.assertEqual(len(args), 38)
self.assertEqual(len(args), 50)
self.assertFalse('yes' in [a for a, _ in args])
self.assertTrue('json_file' in [a for a, _ in args])
self.assertFalse('destination' in [a for a, _ in args])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ def test_batch_pool_cmd(
'--image canonical:ubuntuserver:18.04-lts --node-agent-sku-id "batch.node.ubuntu 18.04" '
'--disk-encryption-targets "TemporaryDisk"')

time.sleep(120)
if self.is_live or self.in_recording:
time.sleep(120)

result = self.batch_cmd('batch pool show --pool-id {p_id}').assert_with_checks([
self.check('allocationState', 'steady'),
Expand Down Expand Up @@ -138,7 +139,7 @@ def test_batch_pool_cmd(
self.check('metadata[1].value', 'd')])

self.batch_cmd('batch pool delete --pool-id {p_id} --yes')

@ResourceGroupPreparer()
@BatchAccountPreparer()
def test_batch_pool_trustedLaunch_cmd(
Expand All @@ -151,7 +152,7 @@ def test_batch_pool_trustedLaunch_cmd(
key = self.get_account_key(
batch_account_name,
resource_group)

self.kwargs.update({
'p_id': 'xplatCreatedPool',
'acc_n': batch_account_name,
Expand All @@ -167,7 +168,7 @@ def test_batch_pool_trustedLaunch_cmd(
'--encryption-at-host true '
'--enable-secure-boot true '
'--enable-vtpm true')

res = self.batch_cmd('batch pool show --pool-id {p_id}').get_output_in_json()

self.assertTrue(res['virtualMachineConfiguration']['securityProfile']['securityType'])
Expand All @@ -189,7 +190,7 @@ def test_batch_pool_osDisk_cmd(
key = self.get_account_key(
batch_account_name,
resource_group)

self.kwargs.update({
'p_id': 'xplatCreatedPool',
'acc_n': batch_account_name,
Expand All @@ -204,7 +205,7 @@ def test_batch_pool_osDisk_cmd(
'--os-disk-size 100 '
'--os-disk-caching ReadWrite '
'--storage-account-type "StandardSSD_LRS" ')

res = self.batch_cmd('batch pool show --pool-id {p_id}').get_output_in_json()
print(res)

Expand All @@ -214,6 +215,62 @@ def test_batch_pool_osDisk_cmd(

self.batch_cmd('batch pool delete --pool-id {p_id} --yes')

@ResourceGroupPreparer()
@BatchAccountPreparer()
def test_batch_pool_upgradePolicy_cmd(
self,
resource_group,
batch_account_name):
endpoint = self.get_account_endpoint(
batch_account_name,
resource_group).replace("https://", "")
key = self.get_account_key(
batch_account_name,
resource_group)

self.kwargs.update({
'p_id': 'xplatCreatedPool',
'acc_n': batch_account_name,
'acc_k': key,
'acc_u': endpoint
})

self.batch_cmd('batch pool create --id {p_id} --vm-size "standard_d4s_v3" '
'--image "MicrosoftWindowsServer:WindowsServer:2016-datacenter-smalldisk" '
'--node-agent-sku-id "batch.node.windows amd64" '
'--policy "zonal" '
'--target-dedicated-nodes 2 '
'--upgrade-policy-mode "automatic" '
'--disable-auto-rollback '
'--enable-auto-os-upgrade '
'--defer-os-rolling-upgrade '
'--use-rolling-upgrade-policy '
'--enable-cross-zone-upgrade '
'--max-batch-instance-percent 20 '
'--max-unhealthy-instance-percent 20 '
'--max-unhealthy-upgraded-instance-percent 20 '
'--pause-time-between-batches "PT0S" '
'--prioritize-unhealthy-instances '
'--rollback-failed-instances-on-policy-breach ')

res = self.batch_cmd('batch pool show --pool-id {p_id}').get_output_in_json()
print(res)

self.assertTrue(res['upgradePolicy']['mode'])
self.assertTrue(res['upgradePolicy']['automaticOsUpgradePolicy']['disableAutomaticRollback'])
self.assertTrue(res['upgradePolicy']['automaticOsUpgradePolicy']['enableAutomaticOsUpgrade'])
self.assertTrue(res['upgradePolicy']['automaticOsUpgradePolicy']['osRollingUpgradeDeferral'])
self.assertTrue(res['upgradePolicy']['automaticOsUpgradePolicy']['useRollingUpgradePolicy'])
self.assertTrue(res['upgradePolicy']['rollingUpgradePolicy']['enableCrossZoneUpgrade'])
self.assertEqual(res['upgradePolicy']['rollingUpgradePolicy']['maxBatchInstancePercent'], 20)
self.assertEqual(res['upgradePolicy']['rollingUpgradePolicy']['maxUnhealthyInstancePercent'], 20)
self.assertEqual(res['upgradePolicy']['rollingUpgradePolicy']['maxUnhealthyUpgradedInstancePercent'], 20)
self.assertTrue(res['upgradePolicy']['rollingUpgradePolicy']['pauseTimeBetweenBatches'], '')
self.assertTrue(res['upgradePolicy']['rollingUpgradePolicy']['prioritizeUnhealthyInstances'])
self.assertTrue(res['upgradePolicy']['rollingUpgradePolicy']['rollbackFailedInstancesOnPolicyBreach'])

self.batch_cmd('batch pool delete --pool-id {p_id} --yes')

@ResourceGroupPreparer()
@BatchAccountPreparer()
def test_batch_pool_enableAcceleratedNetworking_cmd(
Expand Down Expand Up @@ -302,7 +359,7 @@ def test_batch_task_create_cmd(self, resource_group, batch_account_name):

task_result = self.batch_cmd('batch job task-counts show --job-id {j_id}').get_output_in_json()
if self.is_live or self.in_recording or task_result["taskCounts"]["active"] == 0:
time.sleep(10)
time.sleep(10)

task_result = self.batch_cmd('batch job task-counts show --job-id {j_id}').get_output_in_json()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def test_batch_general_arm_cmd(self, resource_group, storage_account):
self.check('encryption.keySource', 'Microsoft.Batch'),
self.check('resourceGroup', '{rg}')])

if self.in_recording:
if self.is_live or self.in_recording:
time.sleep(100)

self.cmd('batch account set -g {rg} -n {acc} --storage-account {str_n}').assert_with_checks([
Expand Down Expand Up @@ -97,7 +97,7 @@ def test_batch_general_arm_cmd(self, resource_group, storage_account):
self.check('[2].category', 'Microsoft Package Repository'),
self.check('[3].category', 'Azure Key Vault'),
self.check('length([0].endpoints)', 2),
self.check('ends_with([0].endpoints[0].domainName, `batch.azure.com`)', True)
self.check('ends_with([0].endpoints[0].domainName, `batch.azure.com`)', True)
])

# test batch account delete
Expand All @@ -106,7 +106,7 @@ def test_batch_general_arm_cmd(self, resource_group, storage_account):
self.cmd('batch account list -g {rg}').assert_with_checks(self.is_empty())

self.cmd('batch location quotas show -l {loc}').assert_with_checks(
[self.check('accountQuota', 3)])
[self.check('accountQuota', 1000)])

self.cmd('batch location list-skus -l {loc} --query "[0:20]"').assert_with_checks([
self.check('length(@)', 20), # Ensure at least 20 entries
Expand All @@ -132,7 +132,7 @@ def test_batch_privateendpoint_cmd(self, resource_group, storage_account):

_, package_file_name = tempfile.mkstemp()


self.kwargs.update({
'rg': resource_group,
'str_n': storage_account,
Expand All @@ -150,15 +150,15 @@ def test_batch_privateendpoint_cmd(self, resource_group, storage_account):
self.check('name', '{acc}'),
self.check('location', '{loc}'),
self.check('resourceGroup', '{rg}')]).get_output_in_json()

self.kwargs['accountId'] = batchaccount['id']

# create private endpoint
self.cmd('network vnet create --resource-group {rg} --name {vnetname} -l {loc}')
self.cmd('network vnet subnet create --resource-group {rg} --name default --vnet-name {vnetname} --address-prefixes 10.0.0.0/24')
self.cmd('network vnet subnet update --name default --resource-group {rg} --vnet-name {vnetname} --disable-private-endpoint-network-policies true')
self.cmd('network private-endpoint create -g {rg} -n {pename} --vnet-name {vnetname} --subnet default --private-connection-resource-id {accountId} --group-id batchAccount --connection-name {pename} -l {loc}')

self.cmd('batch private-link-resource list --account-name {acc} --resource-group {rg}').assert_with_checks([
self.check('length(@)', 2),
self.check('[0].name', 'batchAccount')])
Expand All @@ -181,7 +181,7 @@ def test_batch_network_profile_cmd(self, resource_group, storage_account):

_, package_file_name = tempfile.mkstemp()


self.kwargs.update({
'rg': resource_group,
'str_n': storage_account,
Expand All @@ -199,15 +199,15 @@ def test_batch_network_profile_cmd(self, resource_group, storage_account):
self.check('name', '{acc}'),
self.check('location', '{loc}'),
self.check('resourceGroup', '{rg}')]).get_output_in_json()

self.kwargs['accountId'] = batchaccount['id']

# create private endpoint
output = self.cmd('batch account network-profile network-rule add -n {acc} -g {rg} --profile BatchAccount --ip-address 1.2.3.6').assert_with_checks([
self.check('accountAccess.defaultAction', 'Allow'),
self.check('accountAccess.ipRules[0].value', '1.2.3.6')]).get_output_in_json()


@ResourceGroupPreparer(location='eastus')
@StorageAccountPreparer(location='eastus', name_prefix='clibatchteststor')
def test_batch_managed_identity_cmd(self, resource_group, storage_account):
Expand All @@ -217,7 +217,7 @@ def test_batch_managed_identity_cmd(self, resource_group, storage_account):

_, package_file_name = tempfile.mkstemp()


self.kwargs.update({
'rg': resource_group,
'str_n': storage_account,
Expand Down Expand Up @@ -252,7 +252,7 @@ def test_batch_managed_identity_cmd(self, resource_group, storage_account):
self.check('resourceGroup', '{rg}'),
self.check('identity.type', 'UserAssigned'),
self. check('length(identity.userAssignedIdentities)', 1)]).get_output_in_json()

# display the managed identity
self. cmd('batch account identity show -g {rg} -n {acc}', checks=[
self.check('type', 'UserAssigned')])
Expand All @@ -276,14 +276,14 @@ def test_batch_managed_identity_cmd(self, resource_group, storage_account):
self. cmd('batch account identity assign -g {rg} -n {acc} --user-assigned {identity2_id}', checks=[
self. check('type', 'UserAssigned'),
self. check('length(userAssignedIdentities)', 1)])


@ResourceGroupPreparer(location='eastus')
@StorageAccountPreparer(location='eastus', name_prefix='clibatchteststor')
def test_batch_application_cmd(self, resource_group, storage_account):
account_name = self.create_random_name(prefix='clibatchtestacct', length=24)


_, package_file_name = tempfile.mkstemp()

self.kwargs.update({
Expand Down Expand Up @@ -346,14 +346,14 @@ class BatchMgmtByosScenarioTests(ScenarioTest):

def __init__(self, method_name, *arg, **kwargs):
super().__init__(method_name, *arg, random_config_dir=True, **kwargs)

@ResourceGroupPreparer(location='eastus')
def test_batch_byos_account_cmd(self):
def test_batch_byos_account_cmd(self, resource_group):
account_name = self.create_random_name(prefix='clibatchtestacct', length=24)
kv_name = self.create_random_name('clibatchtestkv', 24)

self.kwargs.update({
'rg': 'clitest.rg6rwx34m4wrz2tr6e52fj4kn6c24jepr5kkrp5kvwsajatmiiynj23s657xjqqubv3',
'rg': resource_group,
'byos_n': account_name,
'byos_l': 'eastus',
'kv': kv_name,
Expand Down Expand Up @@ -382,19 +382,19 @@ def test_batch_byos_account_cmd(self):
self.check('name', '{byos_n}'),
self.check('location', '{byos_l}'),
self.check('resourceGroup', '{rg}')])

# test for resource tags

self.cmd(
'batch account login -g {rg} -n {byos_n}'
)

self.cmd('batch pool create --id xplatCreatedPool --vm-size "standard_d2s_v3" '
'--image "canonical:0001-com-ubuntu-server-focal:20_04-lts" '
'--node-agent-sku-id "batch.node.ubuntu 20.04" '
'--resource-tags "dept=finance env=prod"')


self.cmd('batch pool show --pool-id xplatCreatedPool').assert_with_checks([
self.check('resourceTags.dept', 'finance'),
self.check('resourceTags.env', 'prod'),
Expand Down
Loading