diff --git a/monitoring/snippets/v3/alerts-client/snippets.py b/monitoring/snippets/v3/alerts-client/snippets.py index 80254232e6af..fa9c2d9fb54b 100644 --- a/monitoring/snippets/v3/alerts-client/snippets.py +++ b/monitoring/snippets/v3/alerts-client/snippets.py @@ -18,28 +18,40 @@ import json import os +import google.api_core.exceptions from google.cloud import monitoring_v3 -import google.protobuf.json_format +from google.protobuf import field_mask_pb2 as field_mask +import proto import tabulate # [START monitoring_alert_list_policies] def list_alert_policies(project_name): client = monitoring_v3.AlertPolicyServiceClient() - policies = client.list_alert_policies(project_name) - print(tabulate.tabulate( - [(policy.name, policy.display_name) for policy in policies], - ('name', 'display_name'))) + policies = client.list_alert_policies(name=project_name) + print( + tabulate.tabulate( + [(policy.name, policy.display_name) for policy in policies], + ("name", "display_name"), + ) + ) + + # [END monitoring_alert_list_policies] # [START monitoring_alert_list_channels] def list_notification_channels(project_name): client = monitoring_v3.NotificationChannelServiceClient() - channels = client.list_notification_channels(project_name) - print(tabulate.tabulate( - [(channel.name, channel.display_name) for channel in channels], - ('name', 'display_name'))) + channels = client.list_notification_channels(name=project_name) + print( + tabulate.tabulate( + [(channel.name, channel.display_name) for channel in channels], + ("name", "display_name"), + ) + ) + + # [END monitoring_alert_list_channels] @@ -56,37 +68,50 @@ def enable_alert_policies(project_name, enable, filter_=None): """ client = monitoring_v3.AlertPolicyServiceClient() - policies = client.list_alert_policies(project_name, filter_=filter_) + policies = client.list_alert_policies( + request={"name": project_name, "filter": filter_} + ) for policy in policies: - if bool(enable) == policy.enabled.value: - print('Policy', policy.name, 'is already', - 'enabled' if policy.enabled.value else 'disabled') + if bool(enable) == policy.enabled: + print( + "Policy", + policy.name, + "is already", + "enabled" if policy.enabled else "disabled", + ) else: - policy.enabled.value = bool(enable) - mask = monitoring_v3.types.field_mask_pb2.FieldMask() - mask.paths.append('enabled') - client.update_alert_policy(policy, mask) - print('Enabled' if enable else 'Disabled', policy.name) + policy.enabled = bool(enable) + mask = field_mask.FieldMask() + mask.paths.append("enabled") + client.update_alert_policy(alert_policy=policy, update_mask=mask) + print("Enabled" if enable else "Disabled", policy.name) + + # [END monitoring_alert_enable_policies] # [START monitoring_alert_replace_channels] def replace_notification_channels(project_name, alert_policy_id, channel_ids): - _, project_id = project_name.split('/') + _, project_id = project_name.split("/") alert_client = monitoring_v3.AlertPolicyServiceClient() channel_client = monitoring_v3.NotificationChannelServiceClient() - policy = monitoring_v3.types.alert_pb2.AlertPolicy() + policy = monitoring_v3.AlertPolicy() policy.name = alert_client.alert_policy_path(project_id, alert_policy_id) for channel_id in channel_ids: policy.notification_channels.append( - channel_client.notification_channel_path(project_id, channel_id)) + channel_client.notification_channel_path(project_id, channel_id) + ) + + mask = field_mask.FieldMask() + mask.paths.append("notification_channels") + updated_policy = alert_client.update_alert_policy( + alert_policy=policy, update_mask=mask + ) + print("Updated", updated_policy.name) + - mask = monitoring_v3.types.field_mask_pb2.FieldMask() - mask.paths.append('notification_channels') - updated_policy = alert_client.update_alert_policy(policy, mask) - print('Updated', updated_policy.name) # [END monitoring_alert_replace_channels] @@ -94,16 +119,16 @@ def replace_notification_channels(project_name, alert_policy_id, channel_ids): def delete_notification_channels(project_name, channel_ids, force=None): channel_client = monitoring_v3.NotificationChannelServiceClient() for channel_id in channel_ids: - channel_name = '{}/notificationChannels/{}'.format( - project_name, channel_id) + channel_name = "{}/notificationChannels/{}".format(project_name, channel_id) try: - channel_client.delete_notification_channel( - channel_name, force=force) - print('Channel {} deleted'.format(channel_name)) + channel_client.delete_notification_channel(name=channel_name, force=force) + print("Channel {} deleted".format(channel_name)) except ValueError: - print('The parameters are invalid') + print("The parameters are invalid") except Exception as e: - print('API call failed: {}'.format(e)) + print("API call failed: {}".format(e)) + + # [END monitoring_alert_delete_channel] @@ -111,25 +136,29 @@ def delete_notification_channels(project_name, channel_ids, force=None): def backup(project_name, backup_filename): alert_client = monitoring_v3.AlertPolicyServiceClient() channel_client = monitoring_v3.NotificationChannelServiceClient() - record = {'project_name': project_name, - 'policies': list(alert_client.list_alert_policies(project_name)), - 'channels': list(channel_client.list_notification_channels( - project_name))} - json.dump(record, open(backup_filename, 'wt'), cls=ProtoEncoder, indent=2) - print('Backed up alert policies and notification channels to {}.'.format( - backup_filename) + record = { + "project_name": project_name, + "policies": list(alert_client.list_alert_policies(name=project_name)), + "channels": list(channel_client.list_notification_channels(name=project_name)), + } + json.dump(record, open(backup_filename, "wt"), cls=ProtoEncoder, indent=2) + print( + "Backed up alert policies and notification channels to {}.".format( + backup_filename + ) ) class ProtoEncoder(json.JSONEncoder): - """Uses google.protobuf.json_format to encode protobufs as json.""" + """Encode protobufs as json.""" + def default(self, obj): - if type(obj) in (monitoring_v3.types.alert_pb2.AlertPolicy, - monitoring_v3.types.notification_pb2. - NotificationChannel): - text = google.protobuf.json_format.MessageToJson(obj) + if type(obj) in (monitoring_v3.AlertPolicy, monitoring_v3.NotificationChannel): + text = proto.Message.to_json(obj) return json.loads(text) return super(ProtoEncoder, self).default(obj) + + # [END monitoring_alert_backup_policies] @@ -139,21 +168,25 @@ def default(self, obj): # [START monitoring_alert_update_channel] # [START monitoring_alert_enable_channel] def restore(project_name, backup_filename): - print('Loading alert policies and notification channels from {}.'.format( - backup_filename) + print( + "Loading alert policies and notification channels from {}.".format( + backup_filename + ) ) - record = json.load(open(backup_filename, 'rt')) - is_same_project = project_name == record['project_name'] + record = json.load(open(backup_filename, "rt")) + is_same_project = project_name == record["project_name"] # Convert dicts to AlertPolicies. - policies_json = [json.dumps(policy) for policy in record['policies']] - policies = [google.protobuf.json_format.Parse( - policy_json, monitoring_v3.types.alert_pb2.AlertPolicy()) - for policy_json in policies_json] + policies_json = [json.dumps(policy) for policy in record["policies"]] + policies = [ + monitoring_v3.AlertPolicy.from_json(policy_json) + for policy_json in policies_json + ] # Convert dicts to NotificationChannels - channels_json = [json.dumps(channel) for channel in record['channels']] - channels = [google.protobuf.json_format.Parse( - channel_json, monitoring_v3.types.notification_pb2. - NotificationChannel()) for channel_json in channels_json] + channels_json = [json.dumps(channel) for channel in record["channels"]] + channels = [ + monitoring_v3.NotificationChannel.from_json(channel_json) + for channel_json in channels_json + ] # Restore the channels. channel_client = monitoring_v3.NotificationChannelServiceClient() @@ -161,16 +194,17 @@ def restore(project_name, backup_filename): for channel in channels: updated = False - print('Updating channel', channel.display_name) + print("Updating channel", channel.display_name) # This field is immutable and it is illegal to specify a # non-default value (UNVERIFIED or VERIFIED) in the # Create() or Update() operations. - channel.verification_status = monitoring_v3.enums.NotificationChannel.\ - VerificationStatus.VERIFICATION_STATUS_UNSPECIFIED + channel.verification_status = ( + monitoring_v3.NotificationChannel.VerificationStatus.VERIFICATION_STATUS_UNSPECIFIED + ) if is_same_project: try: - channel_client.update_notification_channel(channel) + channel_client.update_notification_channel(notification_channel=channel) updated = True except google.api_core.exceptions.NotFound: pass # The channel was deleted. Create it below. @@ -178,19 +212,20 @@ def restore(project_name, backup_filename): if not updated: # The channel no longer exists. Recreate it. old_name = channel.name - channel.ClearField("name") + del channel.name new_channel = channel_client.create_notification_channel( - project_name, channel) + name=project_name, notification_channel=channel + ) channel_name_map[old_name] = new_channel.name # Restore the alerts alert_client = monitoring_v3.AlertPolicyServiceClient() for policy in policies: - print('Updating policy', policy.display_name) + print("Updating policy", policy.display_name) # These two fields cannot be set directly, so clear them. - policy.ClearField('creation_record') - policy.ClearField('mutation_record') + del policy.creation_record + del policy.mutation_record # Update old channel names with new channel names. for i, channel in enumerate(policy.notification_channels): @@ -202,7 +237,7 @@ def restore(project_name, backup_filename): if is_same_project: try: - alert_client.update_alert_policy(policy) + alert_client.update_alert_policy(alert_policy=policy) updated = True except google.api_core.exceptions.NotFound: pass # The policy was deleted. Create it below. @@ -214,11 +249,15 @@ def restore(project_name, backup_filename): if not updated: # The policy no longer exists. Recreate it. old_name = policy.name - policy.ClearField("name") + del policy.name for condition in policy.conditions: - condition.ClearField("name") - policy = alert_client.create_alert_policy(project_name, policy) - print('Updated', policy.name) + del condition.name + policy = alert_client.create_alert_policy( + name=project_name, alert_policy=policy + ) + print("Updated", policy.name) + + # [END monitoring_alert_enable_channel] # [END monitoring_alert_restore_policies] # [END monitoring_alert_create_policy] @@ -239,105 +278,87 @@ def project_id(): Returns: str -- the project name """ - project_id = os.environ['GOOGLE_CLOUD_PROJECT'] + project_id = os.environ["GOOGLE_CLOUD_PROJECT"] if not project_id: raise MissingProjectIdError( - 'Set the environment variable ' + - 'GCLOUD_PROJECT to your Google Cloud Project Id.') + "Set the environment variable " + + "GCLOUD_PROJECT to your Google Cloud Project Id." + ) return project_id def project_name(): - return 'projects/' + project_id() + return "projects/" + project_id() -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description='Demonstrates AlertPolicy API operations.') + description="Demonstrates AlertPolicy API operations." + ) - subparsers = parser.add_subparsers(dest='command') + subparsers = parser.add_subparsers(dest="command") list_alert_policies_parser = subparsers.add_parser( - 'list-alert-policies', - help=list_alert_policies.__doc__ + "list-alert-policies", help=list_alert_policies.__doc__ ) list_notification_channels_parser = subparsers.add_parser( - 'list-notification-channels', - help=list_alert_policies.__doc__ + "list-notification-channels", help=list_alert_policies.__doc__ ) enable_alert_policies_parser = subparsers.add_parser( - 'enable-alert-policies', - help=enable_alert_policies.__doc__ + "enable-alert-policies", help=enable_alert_policies.__doc__ ) enable_alert_policies_parser.add_argument( - '--filter', + "--filter", ) disable_alert_policies_parser = subparsers.add_parser( - 'disable-alert-policies', - help=enable_alert_policies.__doc__ + "disable-alert-policies", help=enable_alert_policies.__doc__ ) disable_alert_policies_parser.add_argument( - '--filter', + "--filter", ) replace_notification_channels_parser = subparsers.add_parser( - 'replace-notification-channels', - help=replace_notification_channels.__doc__ + "replace-notification-channels", help=replace_notification_channels.__doc__ ) replace_notification_channels_parser.add_argument( - '-p', '--alert_policy_id', - required=True + "-p", "--alert_policy_id", required=True ) replace_notification_channels_parser.add_argument( - '-c', '--notification_channel_id', - required=True, - action='append' + "-c", "--notification_channel_id", required=True, action="append" ) - backup_parser = subparsers.add_parser( - 'backup', - help=backup.__doc__ - ) - backup_parser.add_argument( - '--backup_to_filename', - required=True - ) + backup_parser = subparsers.add_parser("backup", help=backup.__doc__) + backup_parser.add_argument("--backup_to_filename", required=True) - restore_parser = subparsers.add_parser( - 'restore', - help=restore.__doc__ - ) - restore_parser.add_argument( - '--restore_from_filename', - required=True - ) + restore_parser = subparsers.add_parser("restore", help=restore.__doc__) + restore_parser.add_argument("--restore_from_filename", required=True) args = parser.parse_args() - if args.command == 'list-alert-policies': + if args.command == "list-alert-policies": list_alert_policies(project_name()) - elif args.command == 'list-notification-channels': + elif args.command == "list-notification-channels": list_notification_channels(project_name()) - elif args.command == 'enable-alert-policies': + elif args.command == "enable-alert-policies": enable_alert_policies(project_name(), enable=True, filter_=args.filter) - elif args.command == 'disable-alert-policies': - enable_alert_policies(project_name(), enable=False, - filter_=args.filter) + elif args.command == "disable-alert-policies": + enable_alert_policies(project_name(), enable=False, filter_=args.filter) - elif args.command == 'replace-notification-channels': - replace_notification_channels(project_name(), args.alert_policy_id, - args.notification_channel_id) + elif args.command == "replace-notification-channels": + replace_notification_channels( + project_name(), args.alert_policy_id, args.notification_channel_id + ) - elif args.command == 'backup': + elif args.command == "backup": backup(project_name(), args.backup_to_filename) - elif args.command == 'restore': + elif args.command == "restore": restore(project_name(), args.restore_from_filename) diff --git a/monitoring/snippets/v3/alerts-client/snippets_test.py b/monitoring/snippets/v3/alerts-client/snippets_test.py index 550a8dc97596..ca90a1709feb 100644 --- a/monitoring/snippets/v3/alerts-client/snippets_test.py +++ b/monitoring/snippets/v3/alerts-client/snippets_test.py @@ -23,7 +23,6 @@ from google.api_core.exceptions import NotFound from google.api_core.exceptions import ServiceUnavailable from google.cloud import monitoring_v3 -import google.protobuf.json_format import pytest from retrying import retry @@ -35,13 +34,11 @@ def random_name(length): - return ''.join( - [random.choice(string.ascii_lowercase) for i in range(length)]) + return "".join([random.choice(string.ascii_lowercase) for i in range(length)]) def retry_on_exceptions(exception): - return isinstance( - exception, (Aborted, ServiceUnavailable, DeadlineExceeded)) + return isinstance(exception, (Aborted, ServiceUnavailable, DeadlineExceeded)) def delay_on_aborted(err, *args): @@ -54,7 +51,7 @@ def delay_on_aborted(err, *args): class PochanFixture: """A test fixture that creates an alert POlicy and a notification CHANnel, - hence the name, pochan. + hence the name, pochan. """ def __init__(self): @@ -62,55 +59,64 @@ def __init__(self): self.project_name = snippets.project_name() self.alert_policy_client = monitoring_v3.AlertPolicyServiceClient() self.notification_channel_client = ( - monitoring_v3.NotificationChannelServiceClient()) + monitoring_v3.NotificationChannelServiceClient() + ) def __enter__(self): - @retry(wait_exponential_multiplier=1000, wait_exponential_max=10000, - stop_max_attempt_number=10, - retry_on_exception=retry_on_exceptions) + @retry( + wait_exponential_multiplier=1000, + wait_exponential_max=10000, + stop_max_attempt_number=10, + retry_on_exception=retry_on_exceptions, + ) def setup(): # Create a policy. - policy = monitoring_v3.types.alert_pb2.AlertPolicy() - json = open('test_alert_policy.json').read() - google.protobuf.json_format.Parse(json, policy) - policy.display_name = 'snippets-test-' + random_name(10) + json = open("test_alert_policy.json").read() + policy = monitoring_v3.AlertPolicy.from_json(json) + policy.display_name = "snippets-test-" + random_name(10) self.alert_policy = self.alert_policy_client.create_alert_policy( - self.project_name, policy) + name=self.project_name, alert_policy=policy + ) # Create a notification channel. - notification_channel = ( - monitoring_v3.types.notification_pb2.NotificationChannel()) - json = open('test_notification_channel.json').read() - google.protobuf.json_format.Parse(json, notification_channel) - notification_channel.display_name = ( - 'snippets-test-' + random_name(10)) + json = open("test_notification_channel.json").read() + notification_channel = monitoring_v3.NotificationChannel.from_json(json) + notification_channel.display_name = "snippets-test-" + random_name(10) self.notification_channel = ( self.notification_channel_client.create_notification_channel( - self.project_name, notification_channel)) + name=self.project_name, notification_channel=notification_channel + ) + ) + setup() return self def __exit__(self, type, value, traceback): # Delete the policy and channel we created. - @retry(wait_exponential_multiplier=1000, wait_exponential_max=10000, - stop_max_attempt_number=10, - retry_on_exception=retry_on_exceptions) + @retry( + wait_exponential_multiplier=1000, + wait_exponential_max=10000, + stop_max_attempt_number=10, + retry_on_exception=retry_on_exceptions, + ) def teardown(): try: self.alert_policy_client.delete_alert_policy( - self.alert_policy.name) + name=self.alert_policy.name + ) except NotFound: print("Ignored NotFound when deleting a policy.") try: if self.notification_channel.name: - self.notification_channel_client\ - .delete_notification_channel( - self.notification_channel.name) + self.notification_channel_client.delete_notification_channel( + self.notification_channel.name + ) except NotFound: print("Ignored NotFound when deleting a channel.") + teardown() -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def pochan(): with PochanFixture() as pochan: yield pochan @@ -132,20 +138,24 @@ def test_enable_alert_policies(capsys, pochan): time.sleep(2) snippets.enable_alert_policies(pochan.project_name, True) out, _ = capsys.readouterr() - assert "Enabled {0}".format(pochan.project_name) in out \ + assert ( + "Enabled {0}".format(pochan.project_name) in out or "{} is already enabled".format(pochan.alert_policy.name) in out + ) time.sleep(2) snippets.enable_alert_policies(pochan.project_name, False) out, _ = capsys.readouterr() - assert "Disabled {}".format(pochan.project_name) in out \ + assert ( + "Disabled {}".format(pochan.project_name) in out or "{} is already disabled".format(pochan.alert_policy.name) in out + ) @pytest.mark.flaky(rerun_filter=delay_on_aborted, max_runs=5) def test_replace_channels(capsys, pochan): - alert_policy_id = pochan.alert_policy.name.split('/')[-1] - notification_channel_id = pochan.notification_channel.name.split('/')[-1] + alert_policy_id = pochan.alert_policy.name.split("/")[-1] + notification_channel_id = pochan.notification_channel.name.split("/")[-1] # This sleep call is for mitigating the following error: # "409 Too many concurrent edits to the project configuration. @@ -154,7 +164,8 @@ def test_replace_channels(capsys, pochan): # See also #3310 time.sleep(2) snippets.replace_notification_channels( - pochan.project_name, alert_policy_id, [notification_channel_id]) + pochan.project_name, alert_policy_id, [notification_channel_id] + ) out, _ = capsys.readouterr() assert "Updated {0}".format(pochan.alert_policy.name) in out @@ -167,20 +178,21 @@ def test_backup_and_restore(capsys, pochan): # Having multiple projects will void this `sleep()` call. # See also #3310 time.sleep(2) - snippets.backup(pochan.project_name, 'backup.json') + snippets.backup(pochan.project_name, "backup.json") out, _ = capsys.readouterr() time.sleep(2) - snippets.restore(pochan.project_name, 'backup.json') + snippets.restore(pochan.project_name, "backup.json") out, _ = capsys.readouterr() assert "Updated {0}".format(pochan.alert_policy.name) in out - assert "Updating channel {0}".format( - pochan.notification_channel.display_name) in out + assert ( + "Updating channel {0}".format(pochan.notification_channel.display_name) in out + ) @pytest.mark.flaky(rerun_filter=delay_on_aborted, max_runs=5) def test_delete_channels(capsys, pochan): - notification_channel_id = pochan.notification_channel.name.split('/')[-1] + notification_channel_id = pochan.notification_channel.name.split("/")[-1] # This sleep call is for mitigating the following error: # "409 Too many concurrent edits to the project configuration. @@ -189,7 +201,8 @@ def test_delete_channels(capsys, pochan): # See also #3310 time.sleep(2) snippets.delete_notification_channels( - pochan.project_name, [notification_channel_id], force=True) + pochan.project_name, [notification_channel_id], force=True + ) out, _ = capsys.readouterr() assert "{0} deleted".format(notification_channel_id) in out - pochan.notification_channel.name = '' # So teardown is not tried + pochan.notification_channel.name = "" # So teardown is not tried diff --git a/monitoring/snippets/v3/cloud-client/quickstart.py b/monitoring/snippets/v3/cloud-client/quickstart.py index 0527acae545e..78088dbad9b2 100644 --- a/monitoring/snippets/v3/cloud-client/quickstart.py +++ b/monitoring/snippets/v3/cloud-client/quickstart.py @@ -13,31 +13,33 @@ # limitations under the License. -def run_quickstart(): +def run_quickstart(project=""): # [START monitoring_quickstart] from google.cloud import monitoring_v3 import time client = monitoring_v3.MetricServiceClient() - project = 'my-project' # TODO: Update to your project ID. - project_name = client.project_path(project) + # project = 'my-project' # TODO: Update to your project ID. + project_name = f"projects/{project}" - series = monitoring_v3.types.TimeSeries() - series.metric.type = 'custom.googleapis.com/my_metric' - series.resource.type = 'gce_instance' - series.resource.labels['instance_id'] = '1234567890123456789' - series.resource.labels['zone'] = 'us-central1-f' - point = series.points.add() - point.value.double_value = 3.14 + series = monitoring_v3.TimeSeries() + series.metric.type = "custom.googleapis.com/my_metric" + series.resource.type = "gce_instance" + series.resource.labels["instance_id"] = "1234567890123456789" + series.resource.labels["zone"] = "us-central1-f" now = time.time() - point.interval.end_time.seconds = int(now) - point.interval.end_time.nanos = int( - (now - point.interval.end_time.seconds) * 10**9) - client.create_time_series(project_name, [series]) - print('Successfully wrote time series.') + seconds = int(now) + nanos = int((now - seconds) * 10 ** 9) + interval = monitoring_v3.TimeInterval( + {"end_time": {"seconds": seconds, "nanos": nanos}} + ) + point = monitoring_v3.Point({"interval": interval, "value": {"double_value": 3.14}}) + series.points = [point] + client.create_time_series(request={"name": project_name, "time_series": [series]}) + print("Successfully wrote time series.") # [END monitoring_quickstart] -if __name__ == '__main__': +if __name__ == "__main__": run_quickstart() diff --git a/monitoring/snippets/v3/cloud-client/quickstart_test.py b/monitoring/snippets/v3/cloud-client/quickstart_test.py index fd0191aafc12..d7826e92ce5d 100644 --- a/monitoring/snippets/v3/cloud-client/quickstart_test.py +++ b/monitoring/snippets/v3/cloud-client/quickstart_test.py @@ -15,32 +15,18 @@ import os import backoff -import mock -import pytest import quickstart -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] -@pytest.fixture -def mock_project_path(): - """Mock out project and replace with project from environment.""" - project_patch = mock.patch( - 'google.cloud.monitoring_v3.MetricServiceClient.' - 'project_path') - - with project_patch as project_mock: - project_mock.return_value = 'projects/{}'.format(PROJECT) - yield project_mock - - -def test_quickstart(capsys, mock_project_path): +def test_quickstart(capsys): @backoff.on_exception(backoff.expo, AssertionError, max_time=60) def eventually_consistent_test(): - quickstart.run_quickstart() + quickstart.run_quickstart(PROJECT) out, _ = capsys.readouterr() - assert 'wrote' in out + assert "wrote" in out eventually_consistent_test() diff --git a/monitoring/snippets/v3/cloud-client/snippets.py b/monitoring/snippets/v3/cloud-client/snippets.py index 64b3853fd7cc..1c0407a2bc02 100644 --- a/monitoring/snippets/v3/cloud-client/snippets.py +++ b/monitoring/snippets/v3/cloud-client/snippets.py @@ -18,72 +18,83 @@ import time import uuid +from google.api import metric_pb2 as ga_metric from google.cloud import monitoring_v3 -PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT'] +PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"] def create_metric_descriptor(project_id): # [START monitoring_create_metric] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - descriptor = monitoring_v3.types.MetricDescriptor() - descriptor.type = 'custom.googleapis.com/my_metric' + str(uuid.uuid4()) - descriptor.metric_kind = ( - monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE) - descriptor.value_type = ( - monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE) - descriptor.description = 'This is a simple example of a custom metric.' - descriptor = client.create_metric_descriptor(project_name, descriptor) - print('Created {}.'.format(descriptor.name)) + project_name = f"projects/{project_id}" + descriptor = ga_metric.MetricDescriptor() + descriptor.type = "custom.googleapis.com/my_metric" + str(uuid.uuid4()) + descriptor.metric_kind = ga_metric.MetricDescriptor.MetricKind.GAUGE + descriptor.value_type = ga_metric.MetricDescriptor.ValueType.DOUBLE + descriptor.description = "This is a simple example of a custom metric." + descriptor = client.create_metric_descriptor( + name=project_name, metric_descriptor=descriptor + ) + print("Created {}.".format(descriptor.name)) # [END monitoring_create_metric] def delete_metric_descriptor(descriptor_name): # [START monitoring_delete_metric] client = monitoring_v3.MetricServiceClient() - client.delete_metric_descriptor(descriptor_name) - print('Deleted metric descriptor {}.'.format(descriptor_name)) + client.delete_metric_descriptor(name=descriptor_name) + print("Deleted metric descriptor {}.".format(descriptor_name)) # [END monitoring_delete_metric] def write_time_series(project_id): # [START monitoring_write_timeseries] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - - series = monitoring_v3.types.TimeSeries() - series.metric.type = 'custom.googleapis.com/my_metric' + str(uuid.uuid4()) - series.resource.type = 'gce_instance' - series.resource.labels['instance_id'] = '1234567890123456789' - series.resource.labels['zone'] = 'us-central1-f' - point = series.points.add() - point.value.double_value = 3.14 + project_name = f"projects/{project_id}" + + series = monitoring_v3.TimeSeries() + series.metric.type = "custom.googleapis.com/my_metric" + str(uuid.uuid4()) + series.resource.type = "gce_instance" + series.resource.labels["instance_id"] = "1234567890123456789" + series.resource.labels["zone"] = "us-central1-f" now = time.time() - point.interval.end_time.seconds = int(now) - point.interval.end_time.nanos = int( - (now - point.interval.end_time.seconds) * 10**9) - client.create_time_series(project_name, [series]) + seconds = int(now) + nanos = int((now - seconds) * 10 ** 9) + interval = monitoring_v3.TimeInterval( + {"end_time": {"seconds": seconds, "nanos": nanos}} + ) + point = monitoring_v3.Point({"interval": interval, "value": {"double_value": 3.14}}) + series.points = [point] + client.create_time_series(name=project_name, time_series=[series]) # [END monitoring_write_timeseries] def list_time_series(project_id): # [START monitoring_read_timeseries_simple] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - interval = monitoring_v3.types.TimeInterval() + project_name = f"projects/{project_id}" + interval = monitoring_v3.TimeInterval() + now = time.time() - interval.end_time.seconds = int(now) - interval.end_time.nanos = int( - (now - interval.end_time.seconds) * 10**9) - interval.start_time.seconds = int(now - 1200) - interval.start_time.nanos = interval.end_time.nanos + seconds = int(now) + nanos = int((now - seconds) * 10 ** 9) + interval = monitoring_v3.TimeInterval( + { + "end_time": {"seconds": seconds, "nanos": nanos}, + "start_time": {"seconds": (seconds - 1200), "nanos": nanos}, + } + ) + results = client.list_time_series( - project_name, - 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', - interval, - monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL) + request={ + "name": project_name, + "filter": 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', + "interval": interval, + "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, + } + ) for result in results: print(result) # [END monitoring_read_timeseries_simple] @@ -92,19 +103,24 @@ def list_time_series(project_id): def list_time_series_header(project_id): # [START monitoring_read_timeseries_fields] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - interval = monitoring_v3.types.TimeInterval() + project_name = f"projects/{project_id}" now = time.time() - interval.end_time.seconds = int(now) - interval.end_time.nanos = int( - (now - interval.end_time.seconds) * 10**9) - interval.start_time.seconds = int(now - 1200) - interval.start_time.nanos = interval.end_time.nanos + seconds = int(now) + nanos = int((now - seconds) * 10 ** 9) + interval = monitoring_v3.TimeInterval( + { + "end_time": {"seconds": seconds, "nanos": nanos}, + "start_time": {"seconds": (seconds - 1200), "nanos": nanos}, + } + ) results = client.list_time_series( - project_name, - 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', - interval, - monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.HEADERS) + request={ + "name": project_name, + "filter": 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', + "interval": interval, + "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.HEADERS, + } + ) for result in results: print(result) # [END monitoring_read_timeseries_fields] @@ -113,25 +129,33 @@ def list_time_series_header(project_id): def list_time_series_aggregate(project_id): # [START monitoring_read_timeseries_align] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - interval = monitoring_v3.types.TimeInterval() + project_name = f"projects/{project_id}" + now = time.time() - interval.end_time.seconds = int(now) - interval.end_time.nanos = int( - (now - interval.end_time.seconds) * 10**9) - interval.start_time.seconds = int(now - 3600) - interval.start_time.nanos = interval.end_time.nanos - aggregation = monitoring_v3.types.Aggregation() - aggregation.alignment_period.seconds = 1200 # 20 minutes - aggregation.per_series_aligner = ( - monitoring_v3.enums.Aggregation.Aligner.ALIGN_MEAN) + seconds = int(now) + nanos = int((now - seconds) * 10 ** 9) + interval = monitoring_v3.TimeInterval( + { + "end_time": {"seconds": seconds, "nanos": nanos}, + "start_time": {"seconds": (seconds - 3600), "nanos": nanos}, + } + ) + aggregation = monitoring_v3.Aggregation( + { + "alignment_period": {"seconds": 1200}, # 20 minutes + "per_series_aligner": monitoring_v3.Aggregation.Aligner.ALIGN_MEAN, + } + ) results = client.list_time_series( - project_name, - 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', - interval, - monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL, - aggregation) + request={ + "name": project_name, + "filter": 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', + "interval": interval, + "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, + "aggregation": aggregation, + } + ) for result in results: print(result) # [END monitoring_read_timeseries_align] @@ -140,28 +164,35 @@ def list_time_series_aggregate(project_id): def list_time_series_reduce(project_id): # [START monitoring_read_timeseries_reduce] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - interval = monitoring_v3.types.TimeInterval() + project_name = f"projects/{project_id}" + now = time.time() - interval.end_time.seconds = int(now) - interval.end_time.nanos = int( - (now - interval.end_time.seconds) * 10**9) - interval.start_time.seconds = int(now - 3600) - interval.start_time.nanos = interval.end_time.nanos - aggregation = monitoring_v3.types.Aggregation() - aggregation.alignment_period.seconds = 1200 # 20 minutes - aggregation.per_series_aligner = ( - monitoring_v3.enums.Aggregation.Aligner.ALIGN_MEAN) - aggregation.cross_series_reducer = ( - monitoring_v3.enums.Aggregation.Reducer.REDUCE_MEAN) - aggregation.group_by_fields.append('resource.zone') + seconds = int(now) + nanos = int((now - seconds) * 10 ** 9) + interval = monitoring_v3.TimeInterval( + { + "end_time": {"seconds": seconds, "nanos": nanos}, + "start_time": {"seconds": (seconds - 3600), "nanos": nanos}, + } + ) + aggregation = monitoring_v3.Aggregation( + { + "alignment_period": {"seconds": 1200}, # 20 minutes + "per_series_aligner": monitoring_v3.Aggregation.Aligner.ALIGN_MEAN, + "cross_series_reducer": monitoring_v3.Aggregation.Reducer.REDUCE_MEAN, + "group_by_fields": ["resource.zone"], + } + ) results = client.list_time_series( - project_name, - 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', - interval, - monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL, - aggregation) + request={ + "name": project_name, + "filter": 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', + "interval": interval, + "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, + "aggregation": aggregation, + } + ) for result in results: print(result) # [END monitoring_read_timeseries_reduce] @@ -170,8 +201,8 @@ def list_time_series_reduce(project_id): def list_metric_descriptors(project_id): # [START monitoring_list_descriptors] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - for descriptor in client.list_metric_descriptors(project_name): + project_name = f"projects/{project_id}" + for descriptor in client.list_metric_descriptors(name=project_name): print(descriptor.type) # [END monitoring_list_descriptors] @@ -179,9 +210,8 @@ def list_metric_descriptors(project_id): def list_monitored_resources(project_id): # [START monitoring_list_resources] client = monitoring_v3.MetricServiceClient() - project_name = client.project_path(project_id) - resource_descriptors = ( - client.list_monitored_resource_descriptors(project_name)) + project_name = f"projects/{project_id}" + resource_descriptors = client.list_monitored_resource_descriptors(name=project_name) for descriptor in resource_descriptors: print(descriptor.type) # [END monitoring_list_resources] @@ -190,121 +220,109 @@ def list_monitored_resources(project_id): def get_monitored_resource_descriptor(project_id, resource_type_name): # [START monitoring_get_resource] client = monitoring_v3.MetricServiceClient() - resource_path = client.monitored_resource_descriptor_path( - project_id, resource_type_name) - pprint.pprint(client.get_monitored_resource_descriptor(resource_path)) + resource_path = ( + f"projects/{project_id}/monitoredResourceDescriptors/{resource_type_name}" + ) + pprint.pprint(client.get_monitored_resource_descriptor(name=resource_path)) # [END monitoring_get_resource] def get_metric_descriptor(metric_name): # [START monitoring_get_descriptor] client = monitoring_v3.MetricServiceClient() - descriptor = client.get_metric_descriptor(metric_name) + descriptor = client.get_metric_descriptor(name=metric_name) pprint.pprint(descriptor) # [END monitoring_get_descriptor] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description='Demonstrates Monitoring API operations.') + description="Demonstrates Monitoring API operations." + ) - subparsers = parser.add_subparsers(dest='command') + subparsers = parser.add_subparsers(dest="command") create_metric_descriptor_parser = subparsers.add_parser( - 'create-metric-descriptor', - help=create_metric_descriptor.__doc__ + "create-metric-descriptor", help=create_metric_descriptor.__doc__ ) list_metric_descriptor_parser = subparsers.add_parser( - 'list-metric-descriptors', - help=list_metric_descriptors.__doc__ + "list-metric-descriptors", help=list_metric_descriptors.__doc__ ) get_metric_descriptor_parser = subparsers.add_parser( - 'get-metric-descriptor', - help=get_metric_descriptor.__doc__ + "get-metric-descriptor", help=get_metric_descriptor.__doc__ ) get_metric_descriptor_parser.add_argument( - '--metric-type-name', - help='The metric type of the metric descriptor to see details about.', - required=True + "--metric-type-name", + help="The metric type of the metric descriptor to see details about.", + required=True, ) delete_metric_descriptor_parser = subparsers.add_parser( - 'delete-metric-descriptor', - help=list_metric_descriptors.__doc__ + "delete-metric-descriptor", help=list_metric_descriptors.__doc__ ) delete_metric_descriptor_parser.add_argument( - '--metric-descriptor-name', - help='Metric descriptor to delete', - required=True + "--metric-descriptor-name", help="Metric descriptor to delete", required=True ) list_resources_parser = subparsers.add_parser( - 'list-resources', - help=list_monitored_resources.__doc__ + "list-resources", help=list_monitored_resources.__doc__ ) get_resource_parser = subparsers.add_parser( - 'get-resource', - help=get_monitored_resource_descriptor.__doc__ + "get-resource", help=get_monitored_resource_descriptor.__doc__ ) get_resource_parser.add_argument( - '--resource-type-name', - help='Monitored resource to view more information about.', - required=True + "--resource-type-name", + help="Monitored resource to view more information about.", + required=True, ) write_time_series_parser = subparsers.add_parser( - 'write-time-series', - help=write_time_series.__doc__ + "write-time-series", help=write_time_series.__doc__ ) list_time_series_parser = subparsers.add_parser( - 'list-time-series', - help=list_time_series.__doc__ + "list-time-series", help=list_time_series.__doc__ ) list_time_series_header_parser = subparsers.add_parser( - 'list-time-series-header', - help=list_time_series_header.__doc__ + "list-time-series-header", help=list_time_series_header.__doc__ ) read_time_series_reduce = subparsers.add_parser( - 'list-time-series-reduce', - help=list_time_series_reduce.__doc__ + "list-time-series-reduce", help=list_time_series_reduce.__doc__ ) read_time_series_aggregate = subparsers.add_parser( - 'list-time-series-aggregate', - help=list_time_series_aggregate.__doc__ + "list-time-series-aggregate", help=list_time_series_aggregate.__doc__ ) args = parser.parse_args() - if args.command == 'create-metric-descriptor': + if args.command == "create-metric-descriptor": create_metric_descriptor(PROJECT_ID) - if args.command == 'list-metric-descriptors': + if args.command == "list-metric-descriptors": list_metric_descriptors(PROJECT_ID) - if args.command == 'get-metric-descriptor': + if args.command == "get-metric-descriptor": get_metric_descriptor(args.metric_type_name) - if args.command == 'delete-metric-descriptor': + if args.command == "delete-metric-descriptor": delete_metric_descriptor(args.metric_descriptor_name) - if args.command == 'list-resources': + if args.command == "list-resources": list_monitored_resources(PROJECT_ID) - if args.command == 'get-resource': - get_monitored_resource_descriptor( - PROJECT_ID, args.resource_type_name) - if args.command == 'write-time-series': + if args.command == "get-resource": + get_monitored_resource_descriptor(PROJECT_ID, args.resource_type_name) + if args.command == "write-time-series": write_time_series(PROJECT_ID) - if args.command == 'list-time-series': + if args.command == "list-time-series": list_time_series(PROJECT_ID) - if args.command == 'list-time-series-header': + if args.command == "list-time-series-header": list_time_series_header(PROJECT_ID) - if args.command == 'list-time-series-reduce': + if args.command == "list-time-series-reduce": list_time_series_reduce(PROJECT_ID) - if args.command == 'list-time-series-aggregate': + if args.command == "list-time-series-aggregate": list_time_series_aggregate(PROJECT_ID) diff --git a/monitoring/snippets/v3/cloud-client/snippets_test.py b/monitoring/snippets/v3/cloud-client/snippets_test.py index 5aabbda83922..d6c7d07adf4b 100644 --- a/monitoring/snippets/v3/cloud-client/snippets_test.py +++ b/monitoring/snippets/v3/cloud-client/snippets_test.py @@ -23,14 +23,14 @@ import snippets -PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT'] +PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"] @pytest.fixture(scope="function") def custom_metric_descriptor(capsys): snippets.create_metric_descriptor(PROJECT_ID) out, _ = capsys.readouterr() - match = re.search(r'Created (.*)\.', out) + match = re.search(r"Created (.*)\.", out) metric_name = match.group(1) yield metric_name @@ -43,7 +43,6 @@ def custom_metric_descriptor(capsys): @pytest.fixture(scope="module") def write_time_series(): - @backoff.on_exception(backoff.expo, InternalServerError, max_time=120) def write(): snippets.write_time_series(PROJECT_ID) @@ -54,64 +53,63 @@ def write(): def test_get_delete_metric_descriptor(capsys, custom_metric_descriptor): try: - @backoff.on_exception( - backoff.expo, (AssertionError, NotFound), max_time=60) + + @backoff.on_exception(backoff.expo, (AssertionError, NotFound), max_time=60) def eventually_consistent_test(): snippets.get_metric_descriptor(custom_metric_descriptor) out, _ = capsys.readouterr() - assert 'DOUBLE' in out + assert "DOUBLE" in out eventually_consistent_test() finally: snippets.delete_metric_descriptor(custom_metric_descriptor) out, _ = capsys.readouterr() - assert 'Deleted metric' in out + assert "Deleted metric" in out def test_list_metric_descriptors(capsys): snippets.list_metric_descriptors(PROJECT_ID) out, _ = capsys.readouterr() - assert 'logging.googleapis.com/byte_count' in out + assert "logging.googleapis.com/byte_count" in out def test_list_resources(capsys): snippets.list_monitored_resources(PROJECT_ID) out, _ = capsys.readouterr() - assert 'pubsub_topic' in out + assert "pubsub_topic" in out def test_get_resources(capsys): - snippets.get_monitored_resource_descriptor( - PROJECT_ID, 'pubsub_topic') + snippets.get_monitored_resource_descriptor(PROJECT_ID, "pubsub_topic") out, _ = capsys.readouterr() - assert 'A topic in Google Cloud Pub/Sub' in out + assert "A topic in Google Cloud Pub/Sub" in out def test_list_time_series(capsys, write_time_series): snippets.list_time_series(PROJECT_ID) out, _ = capsys.readouterr() - assert 'gce_instance' in out + assert "gce_instance" in out def test_list_time_series_header(capsys, write_time_series): snippets.list_time_series_header(PROJECT_ID) out, _ = capsys.readouterr() - assert 'gce_instance' in out + assert "gce_instance" in out def test_list_time_series_aggregate(capsys, write_time_series): snippets.list_time_series_aggregate(PROJECT_ID) out, _ = capsys.readouterr() - assert 'points' in out - assert 'interval' in out - assert 'start_time' in out - assert 'end_time' in out + assert "points" in out + assert "interval" in out + assert "start_time" in out + assert "end_time" in out def test_list_time_series_reduce(capsys, write_time_series): snippets.list_time_series_reduce(PROJECT_ID) out, _ = capsys.readouterr() - assert 'points' in out - assert 'interval' in out - assert 'start_time' in out - assert 'end_time' in out + assert "points" in out + assert "interval" in out + assert "start_time" in out + assert "end_time" in out diff --git a/monitoring/snippets/v3/uptime-check-client/snippets.py b/monitoring/snippets/v3/uptime-check-client/snippets.py index dcde3b58650d..0970b8e7b8ca 100644 --- a/monitoring/snippets/v3/uptime-check-client/snippets.py +++ b/monitoring/snippets/v3/uptime-check-client/snippets.py @@ -19,48 +19,51 @@ import pprint from google.cloud import monitoring_v3 +from google.protobuf import field_mask_pb2 import tabulate # [START monitoring_uptime_check_create] def create_uptime_check_config_get(project_name, host_name=None, display_name=None): - config = monitoring_v3.types.uptime_pb2.UptimeCheckConfig() + config = monitoring_v3.UptimeCheckConfig() config.display_name = display_name or "New GET uptime check" - config.monitored_resource.type = "uptime_url" - config.monitored_resource.labels.update({"host": host_name or "example.com"}) - config.http_check.request_method = ( - monitoring_v3.enums.UptimeCheckConfig.HttpCheck.RequestMethod.GET - ) - config.http_check.path = "/" - config.http_check.port = 80 - config.timeout.seconds = 10 - config.period.seconds = 300 + config.monitored_resource = { + "type": "uptime_url", + "labels": {"host": host_name or "example.com"} + } + config.http_check = { + "request_method": monitoring_v3.UptimeCheckConfig.HttpCheck.RequestMethod.GET, + "path": "/", + "port": 80 + } + config.timeout = {"seconds": 10} + config.period = {"seconds": 300} client = monitoring_v3.UptimeCheckServiceClient() - new_config = client.create_uptime_check_config(project_name, config) + new_config = client.create_uptime_check_config(request={"parent": project_name, "uptime_check_config": config}) pprint.pprint(new_config) return new_config def create_uptime_check_config_post(project_name, host_name=None, display_name=None): - config = monitoring_v3.types.uptime_pb2.UptimeCheckConfig() + config = monitoring_v3.UptimeCheckConfig() config.display_name = display_name or "New POST uptime check" - config.monitored_resource.type = "uptime_url" - config.monitored_resource.labels.update({"host": host_name or "example.com"}) - config.http_check.request_method = ( - monitoring_v3.enums.UptimeCheckConfig.HttpCheck.RequestMethod.POST - ) - config.http_check.content_type = ( - monitoring_v3.enums.UptimeCheckConfig.HttpCheck.ContentType.URL_ENCODED - ) - config.http_check.body = "foo=bar".encode("utf-8") - config.http_check.path = "/" - config.http_check.port = 80 - config.timeout.seconds = 10 - config.period.seconds = 300 + config.monitored_resource = { + "type": "uptime_url", + "labels": {"host": host_name or "example.com"} + } + config.http_check = { + "request_method": monitoring_v3.UptimeCheckConfig.HttpCheck.RequestMethod.POST, + "content_type": monitoring_v3.UptimeCheckConfig.HttpCheck.ContentType.URL_ENCODED, + "body": "foo=bar".encode("utf-8"), + "path": "/", + "port": 80 + } + config.timeout = {"seconds": 10} + config.period = {"seconds": 300} client = monitoring_v3.UptimeCheckServiceClient() - new_config = client.create_uptime_check_config(project_name, config) + new_config = client.create_uptime_check_config(request={"parent": project_name, "uptime_check_config": config}) pprint.pprint(new_config) return new_config @@ -72,15 +75,15 @@ def update_uptime_check_config( config_name, new_display_name=None, new_http_check_path=None ): client = monitoring_v3.UptimeCheckServiceClient() - config = client.get_uptime_check_config(config_name) - field_mask = monitoring_v3.types.FieldMask() + config = client.get_uptime_check_config(request={"name": config_name}) + field_mask = field_mask_pb2.FieldMask() if new_display_name: field_mask.paths.append("display_name") config.display_name = new_display_name if new_http_check_path: field_mask.paths.append("http_check.path") config.http_check.path = new_http_check_path - client.update_uptime_check_config(config, field_mask) + client.update_uptime_check_config(request={"uptime_check_config": config, "update_mask": field_mask}) # [END monitoring_uptime_check_update] @@ -89,7 +92,7 @@ def update_uptime_check_config( # [START monitoring_uptime_check_list_configs] def list_uptime_check_configs(project_name): client = monitoring_v3.UptimeCheckServiceClient() - configs = client.list_uptime_check_configs(project_name) + configs = client.list_uptime_check_configs(request={"parent": project_name}) for config in configs: pprint.pprint(config) @@ -101,7 +104,7 @@ def list_uptime_check_configs(project_name): # [START monitoring_uptime_check_list_ips] def list_uptime_check_ips(): client = monitoring_v3.UptimeCheckServiceClient() - ips = client.list_uptime_check_ips() + ips = client.list_uptime_check_ips(request={}) print( tabulate.tabulate( [(ip.region, ip.location, ip.ip_address) for ip in ips], @@ -116,7 +119,7 @@ def list_uptime_check_ips(): # [START monitoring_uptime_check_get] def get_uptime_check_config(config_name): client = monitoring_v3.UptimeCheckServiceClient() - config = client.get_uptime_check_config(config_name) + config = client.get_uptime_check_config(request={"name": config_name}) pprint.pprint(config) @@ -128,7 +131,7 @@ def get_uptime_check_config(config_name): # See https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.uptimeCheckConfigs#UptimeCheckConfig. def delete_uptime_check_config(config_name): client = monitoring_v3.UptimeCheckServiceClient() - client.delete_uptime_check_config(config_name) + client.delete_uptime_check_config(request={"name": config_name}) print("Deleted ", config_name)